diff --git a/TESTING.asciidoc b/TESTING.asciidoc index a3f92d5db1232..ffd2d9605e721 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -166,7 +166,7 @@ systemProp.tests.jvms=8 ---------------------------- Its difficult to pick the "right" number here. Hypercores don't count for CPU -intensive tests and you should leave some slack for JVM-interal threads like +intensive tests and you should leave some slack for JVM-internal threads like the garbage collector. And you have to have enough RAM to handle each JVM. === Test compatibility. diff --git a/build.gradle b/build.gradle index 7e067b8997805..ea4ce45a0bc0c 100644 --- a/build.gradle +++ b/build.gradle @@ -496,7 +496,7 @@ allprojects { // otherwise the eclipse merging is *super confusing* tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings) - // work arround https://github.com/gradle/gradle/issues/6582 + // work around https://github.com/gradle/gradle/issues/6582 tasks.eclipseProject.mustRunAfter tasks.cleanEclipseProject tasks.matching { it.name == 'eclipseClasspath' }.all { it.mustRunAfter { tasks.cleanEclipseClasspath } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index cd6c97078b8ab..ae95bf88e970b 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -225,7 +225,7 @@ if (project != rootProject) { } /* - * We alread configure publication and we don't need or want this one that + * We already configure publication and we don't need or want this one that * comes from the java-gradle-plugin. */ afterEvaluate { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c2741ed5819f4..b9d5f66199b22 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -150,7 +150,7 @@ class BuildPlugin implements Plugin { } String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));' - boolean inFipsJvm = Boolean.parseBoolean(runJavascript(project, runtimeJavaHome, inFipsJvmScript)) + boolean inFipsJvm = Boolean.parseBoolean(runJavaScript(project, runtimeJavaHome, inFipsJvmScript)) // Build debugging info println '=======================================' @@ -431,28 +431,28 @@ class BuildPlugin implements Plugin { String versionInfoScript = 'print(' + 'java.lang.System.getProperty("java.vendor") + " " + java.lang.System.getProperty("java.version") + ' + '" [" + java.lang.System.getProperty("java.vm.name") + " " + java.lang.System.getProperty("java.vm.version") + "]");' - return runJavascript(project, javaHome, versionInfoScript).trim() + return runJavaScript(project, javaHome, versionInfoScript).trim() } /** Finds the parsable java specification version */ private static String findJavaSpecificationVersion(Project project, String javaHome) { String versionScript = 'print(java.lang.System.getProperty("java.specification.version"));' - return runJavascript(project, javaHome, versionScript) + return runJavaScript(project, javaHome, versionScript) } private static String findJavaVendor(Project project, String javaHome) { String vendorScript = 'print(java.lang.System.getProperty("java.vendor"));' - return runJavascript(project, javaHome, vendorScript) + return runJavaScript(project, javaHome, vendorScript) } /** Finds the parsable java specification version */ private static String findJavaVersion(Project project, String javaHome) { String versionScript = 'print(java.lang.System.getProperty("java.version"));' - return runJavascript(project, javaHome, versionScript) + return runJavaScript(project, javaHome, versionScript) } /** Runs the given javascript using jjs from the jdk, and returns the output */ - private static String runJavascript(Project project, String javaHome, String script) { + private static String runJavaScript(Project project, String javaHome, String script) { ByteArrayOutputStream stdout = new ByteArrayOutputStream() ByteArrayOutputStream stderr = new ByteArrayOutputStream() if (Os.isFamily(Os.FAMILY_WINDOWS)) { @@ -553,7 +553,7 @@ class BuildPlugin implements Plugin { RepositoryHandler repos = project.repositories if (System.getProperty("repos.mavenLocal") != null) { // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is - // useful for development ie. bwc tests where we install stuff in the local repository + // useful for development i.e. bwc tests where we install stuff in the local repository // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/FileContentsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/FileContentsTask.groovy index 248083af5e0d5..e500187b1fbc5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/FileContentsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/FileContentsTask.groovy @@ -37,7 +37,7 @@ class FileContentsTask extends DefaultTask { Object contents /** - * The file to be built. Takes any objecct and coerces to a file. + * The file to be built. Takes any object and coerces to a file. */ void setFile(Object file) { this.file = file as File diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 9fdb1b41ec0d2..5a4aa1b86900f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -68,7 +68,7 @@ class PrecommitTasks { * (which provides NamingConventionsCheck) and :test:logger-usage * which provides the logger usage check. Since the build tools * don't use the logger usage check because they don't have any - * of Elaticsearch's loggers and :test:logger-usage actually does + * of Elasticsearch's loggers and :test:logger-usage actually does * use the NamingConventionsCheck we break the circular dependency * here. */ diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java index 105deabfd40fd..10f7492b265ce 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java @@ -233,7 +233,7 @@ private PatternFilterable getRandomizedTestingPatternSet(Task task) { Method getPatternSet = task.getClass().getMethod("getPatternSet"); return (PatternFilterable) getPatternSet.invoke(task); } catch (NoSuchMethodException e) { - throw new IllegalStateException("Expecte task to have a `patternSet` " + task, e); + throw new IllegalStateException("Expected task to have a `patternSet` " + task, e); } catch (IllegalAccessException | InvocationTargetException e) { throw new IllegalStateException("Failed to get pattern set from task" + task, e); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fa4415bbe1e91..4fe4e9cda66a9 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -183,7 +183,7 @@ private void startElasticsearchProcess(File distroArtifact) { try { processBuilder.directory(workingDir); Map environment = processBuilder.environment(); - // Don't inherit anything from the environment for as that would lack reproductability + // Don't inherit anything from the environment for as that would lack reproducibility environment.clear(); if (javaHome != null) { environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); @@ -219,7 +219,7 @@ public String getTransportPortURI() { synchronized void stop(boolean tailLogs) { if (esProcess == null && tailLogs) { // This is a special case. If start() throws an exception the plugin will still call stop - // Another exception here would eat the orriginal. + // Another exception here would eat the original. return; } logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 1fe8bec1902f6..8926f74ca39a7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -311,7 +311,7 @@ private static void configureCleanupHooks(Project project) { shutdownExecutorService(); }); // When the Daemon is not used, or runs into issues, rely on a shutdown hook - // When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptable + // When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptible // thread in the build) process will be stopped eventually when the daemon dies. Runtime.getRuntime().addShutdownHook(new Thread(TestClustersPlugin::shutDownAllClusters)); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java index d1b4e893ec6ad..b6c32bdbe4a10 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java @@ -350,7 +350,7 @@ public void testCompareToAuthoritativeUnreleasedActuallyReleased() { vc.compareToAuthoritative(authoritativeReleasedVersions); } - public void testCompareToAuthoritativeNotReallyRelesed() { + public void testCompareToAuthoritativeNotReallyReleased() { List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1") .map(Version::fromString) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index ab29a33a62eda..75c168b1c228b 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -160,11 +160,11 @@ public void assertOutputOnlyOnce(String output, String... text) { for (String each : text) { int i = output.indexOf(each); if (i == -1 ) { - fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutout is:\n"+ output + fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutput is:\n"+ output ); } if(output.indexOf(each) != output.lastIndexOf(each)) { - fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output); + fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutput is:\n"+ output); } } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java index 18871e16555ef..c17a122973907 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/JUnit3MethodProvider.java @@ -39,7 +39,7 @@ public Collection getTestMethods(Class suiteClass, ClassModel classMo Map methods = classModel.getMethods(); ArrayList result = new ArrayList<>(); for (MethodModel mm : methods.values()) { - // Skip any methods that have overrieds/ shadows. + // Skip any methods that have overrides/ shadows. if (mm.getDown() != null) continue; Method m = mm.element; diff --git a/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle b/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle index c87c097e6beb6..58183ac1e029f 100644 --- a/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle +++ b/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle @@ -32,7 +32,7 @@ task sample { task noConfigAfterExecution { dependsOn buildResources doLast { - println "This should cause an error because we are refferencing " + + println "This should cause an error because we are referencing " + "${buildResources.copy('checkstyle_suppressions.xml')} after the `buildResources` task has ran." } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java index eb1c666a0cf1e..fd37ca60bc15e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java @@ -72,6 +72,6 @@ public float getRequestsPerSecond() { @Override public String toString() { - return "RethrottleRequest: taskID = " + taskId +"; reqestsPerSecond = " + requestsPerSecond; + return "RethrottleRequest: taskID = " + taskId +"; requestsPerSecond = " + requestsPerSecond; } } \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java index 9d75132a903c3..0c9cdbeea8662 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java @@ -291,7 +291,7 @@ public ExecuteWatchResponse executeWatch(ExecuteWatchRequest request, RequestOpt * the docs for more. * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notifed upon request completion + * @param listener the listener to be notified upon request completion */ public void executeWatchAsync(ExecuteWatchRequest request, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::executeWatch, options, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/common/ProtocolUtils.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/common/ProtocolUtils.java index 9181f4f0c0bbd..08fc8f8dcb5ec 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/common/ProtocolUtils.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/common/ProtocolUtils.java @@ -30,7 +30,7 @@ public final class ProtocolUtils { /** * Implements equals for a map of string arrays * - * The map of string arrays is used in some XPack protocol classes but does't work with equal. + * The map of string arrays is used in some XPack protocol classes but doesn't work with equal. */ public static boolean equals(Map a, Map b) { if (a == null) { @@ -61,7 +61,7 @@ public static boolean equals(Map a, Map b) { /** * Implements hashCode for map of string arrays * - * The map of string arrays does't work with hashCode. + * The map of string arrays doesn't work with hashCode. */ public static int hashCode(Map a) { int hash = 0; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java index 4d2a000a00c89..d1e931de0a99e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java @@ -246,7 +246,7 @@ public boolean returnDetailedInfo() { /** * Add a stage in the graph exploration. Each hop represents a stage of - * querying elasticsearch to identify terms which can then be connnected to + * querying elasticsearch to identify terms which can then be connected to * other terms in a subsequent hop. * * @param guidingQuery diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java index dddc4bedfe466..d6995ba1e90b7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java @@ -167,7 +167,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "GraphExploreResponsenParser", true, + "GraphExploreResponseParser", true, args -> { GraphExploreResponse result = new GraphExploreResponse(); result.vertices = new HashMap<>(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java index 83196aada7061..1f5332d96e9e8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Hop.java @@ -45,7 +45,7 @@ *

* Optionally, each hop can contain a "guiding query" that further limits the set of documents considered. * In our weblog example above we might choose to constrain the second hop to only look at log records that - * had a reponse code of 404. + * had a response code of 404. *

*

* If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java index fc007cb5aebd4..cd080035eb6e4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java @@ -66,8 +66,8 @@ public static GetLifecyclePolicyResponse fromXContent(XContentParser parser) thr while (!parser.isClosed()) { if (parser.currentToken() == XContentParser.Token.START_OBJECT) { String policyName = parser.currentName(); - LifecyclePolicyMetadata policyDefinion = LifecyclePolicyMetadata.parse(parser, policyName); - policies.put(policyName, policyDefinion); + LifecyclePolicyMetadata policyDefinition = LifecyclePolicyMetadata.parse(parser, policyName); + policies.put(policyName, policyDefinition); } else { parser.nextToken(); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/FreezeIndexRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/FreezeIndexRequest.java index d78f2d533ab87..7e7a9d090949d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/FreezeIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/FreezeIndexRequest.java @@ -70,7 +70,7 @@ public void setIndicesOptions(IndicesOptions indicesOptions) { } /** - * Returns the wait for active shard cound or null if the default should be used + * Returns the wait for active shard count or null if the default should be used */ public ActiveShardCount getWaitForActiveShards() { return waitForActiveShards; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/UnfreezeIndexRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/UnfreezeIndexRequest.java index 1e2d7cded7129..5a3c01d1fc3ec 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/UnfreezeIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/UnfreezeIndexRequest.java @@ -70,7 +70,7 @@ public void setIndicesOptions(IndicesOptions indicesOptions) { } /** - * Returns the wait for active shard cound or null if the default should be used + * Returns the wait for active shard count or null if the default should be used */ public ActiveShardCount getWaitForActiveShards() { return waitForActiveShards; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java index 15499a650439d..abe435b79f5a8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobUpdate.java @@ -73,7 +73,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, @Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval, @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, - @Nullable Long modelSnapshotRetentionDays, @Nullable List categorisationFilters, + @Nullable Long modelSnapshotRetentionDays, @Nullable List categorizationFilters, @Nullable Map customSettings) { this.jobId = jobId; this.groups = groups; @@ -85,7 +85,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String this.backgroundPersistInterval = backgroundPersistInterval; this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; this.resultsRetentionDays = resultsRetentionDays; - this.categorizationFilters = categorisationFilters; + this.categorizationFilters = categorizationFilters; this.customSettings = customSettings; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java index 7afef0785fe38..2ef3c6725bf87 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java @@ -307,7 +307,7 @@ public Date getLatestRecordTimeStamp() { /** * The wall clock time the latest record was seen. * - * @return Wall clock time of the lastest record + * @return Wall clock time of the latest record */ public Date getLastDataTimeStamp() { return lastDataTimeStamp; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobRequest.java index 410bc7caa09da..b24be9bb01745 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobRequest.java @@ -32,7 +32,7 @@ public class GetRollupJobRequest implements Validatable { private final String jobId; /** - * Create a requets . + * Create a requests . * @param jobId id of the job to return or {@code _all} to return all jobs */ public GetRollupJobRequest(final String jobId) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java index c01914ed2fd94..3718fd0448773 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java @@ -185,7 +185,7 @@ public final String toString() { public static class RollupIndexerJobStats { private final long numPages; private final long numInputDocuments; - private final long numOuputDocuments; + private final long numOutputDocuments; private final long numInvocations; private long indexTime; private long indexTotal; @@ -194,11 +194,11 @@ public static class RollupIndexerJobStats { private long indexFailures; private long searchFailures; - RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations, + RollupIndexerJobStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations, long indexTime, long indexTotal, long searchTime, long searchTotal, long indexFailures, long searchFailures) { this.numPages = numPages; this.numInputDocuments = numInputDocuments; - this.numOuputDocuments = numOuputDocuments; + this.numOutputDocuments = numOutputDocuments; this.numInvocations = numInvocations; this.indexTime = indexTime; this.indexTotal = indexTotal; @@ -233,7 +233,7 @@ public long getNumInvocations() { * Number of documents written to the result indices. */ public long getOutputDocuments() { - return numOuputDocuments; + return numOutputDocuments; } /** @@ -304,7 +304,7 @@ public boolean equals(Object other) { RollupIndexerJobStats that = (RollupIndexerJobStats) other; return Objects.equals(this.numPages, that.numPages) && Objects.equals(this.numInputDocuments, that.numInputDocuments) - && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numOutputDocuments, that.numOutputDocuments) && Objects.equals(this.numInvocations, that.numInvocations) && Objects.equals(this.indexTime, that.indexTime) && Objects.equals(this.searchTime, that.searchTime) @@ -316,7 +316,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations, + return Objects.hash(numPages, numInputDocuments, numOutputDocuments, numInvocations, indexTime, searchTime, indexFailures, searchFailures, searchTotal, indexTotal); } @@ -324,7 +324,7 @@ public int hashCode() { public final String toString() { return "{pages=" + numPages + ", input_docs=" + numInputDocuments - + ", output_docs=" + numOuputDocuments + + ", output_docs=" + numOutputDocuments + ", invocations=" + numInvocations + ", index_failures=" + indexFailures + ", search_failures=" + searchFailures diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 2b870dbc475ea..1abbb343f7b70 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -287,7 +287,7 @@ public void testGlobalParametersAndSingleRequest() throws Exception { final CountDownLatch latch = new CountDownLatch(1); BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); - createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); + createFieldAddingPipeline("pipeline_id", "fieldNameXYZ", "valueXYZ"); // tag::bulk-processor-mix-parameters try (BulkProcessor processor = initBulkProcessorBuilder(listener) @@ -322,7 +322,7 @@ public void testGlobalParametersAndBulkProcessor() throws Exception { final CountDownLatch latch = new CountDownLatch(1); BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); - createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); + createFieldAddingPipeline("pipeline_id", "fieldNameXYZ", "valueXYZ"); int numDocs = randomIntBetween(10, 10); try (BulkProcessor processor = initBulkProcessorBuilder(listener) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java index bb9f78622c821..91fd2ab262064 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -45,7 +45,7 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest @SuppressWarnings("unchecked") public void testGlobalPipelineOnBulkRequest() throws IOException { - createFieldAddingPipleine("xyz", "fieldNameXYZ", "valueXYZ"); + createFieldAddingPipeline("xyz", "fieldNameXYZ", "valueXYZ"); BulkRequest request = new BulkRequest(); request.add(new IndexRequest("test").id("1") @@ -62,8 +62,8 @@ public void testGlobalPipelineOnBulkRequest() throws IOException { } public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { - createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); - createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); + createFieldAddingPipeline("globalId", "fieldXYZ", "valueXYZ"); + createFieldAddingPipeline("perIndexId", "someNewField", "someValue"); BulkRequest request = new BulkRequest(); request.pipeline("globalId"); @@ -84,8 +84,8 @@ public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { @SuppressWarnings("unchecked") public void testMixPipelineOnRequestAndGlobal() throws IOException { - createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); - createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); + createFieldAddingPipeline("globalId", "fieldXYZ", "valueXYZ"); + createFieldAddingPipeline("perIndexId", "someNewField", "someValue"); // tag::bulk-request-mix-pipeline BulkRequest request = new BulkRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index 90799522372dc..ddf74111b5f4b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -171,9 +171,9 @@ public void testIndexFollowing() throws Exception { // Need to close index prior to unfollowing it: CloseIndexRequest closeIndexRequest = new CloseIndexRequest("follower"); - org.elasticsearch.action.support.master.AcknowledgedResponse closeIndexReponse = + org.elasticsearch.action.support.master.AcknowledgedResponse closeIndexResponse = highLevelClient().indices().close(closeIndexRequest, RequestOptions.DEFAULT); - assertThat(closeIndexReponse.isAcknowledged(), is(true)); + assertThat(closeIndexResponse.isAcknowledged(), is(true)); UnfollowRequest unfollowRequest = new UnfollowRequest("follower"); AcknowledgedResponse unfollowResponse = execute(unfollowRequest, ccrClient::unfollow, ccrClient::unfollowAsync); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index efe94596b81b6..dabf85327c4a2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -167,7 +167,7 @@ protected static XContentBuilder buildRandomXContentPipeline() throws IOExceptio return buildRandomXContentPipeline(pipelineBuilder); } - protected static void createFieldAddingPipleine(String id, String fieldName, String value) throws IOException { + protected static void createFieldAddingPipeline(String id, String fieldName, String value) throws IOException { XContentBuilder pipeline = jsonBuilder() .startObject() .startArray("processors") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index a639a09b3cc53..49606d99dad0e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -1196,7 +1196,7 @@ public void testIndexPutSettings() throws IOException { highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.getMessage(), startsWith( "Elasticsearch exception [type=illegal_argument_exception, " - + "reason=final index setting [index.number_of_shards], not updateable")); + + "reason=final index setting [index.number_of_shards], not updatable")); } @SuppressWarnings("unchecked") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index 900f4210a9952..e14aa9f45f133 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -384,9 +384,9 @@ public void testPutRole() throws IOException { final Map metadata = Collections.singletonMap(randomAlphaOfLengthBetween(4, 7), randomAlphaOfLengthBetween(4, 7)); final String[] runAsPrivilege = randomArray(3, String[]::new, () -> randomAlphaOfLength(5)); final List applicationPrivilegeNames = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))); - final List applicationResouceNames = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))); + final List applicationResourceNames = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))); final ApplicationResourcePrivileges applicationResourcePrivilege = new ApplicationResourcePrivileges( - randomAlphaOfLengthBetween(4, 7), applicationPrivilegeNames, applicationResouceNames); + randomAlphaOfLengthBetween(4, 7), applicationPrivilegeNames, applicationResourceNames); final List indicesName = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))); final List indicesPrivilegeName = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))); final List indicesPrivilegeGrantedName = Arrays.asList(randomArray(3, String[]::new, () -> randomAlphaOfLength(5))); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index c8220e9cc0c05..44586aaf7108e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -1524,9 +1524,9 @@ public void testGetPrivileges() throws Exception { assertNotNull(response); assertThat(response.getPrivileges().size(), equalTo(3)); - final GetPrivilegesResponse exptectedResponse = + final GetPrivilegesResponse expectedResponse = new GetPrivilegesResponse(Arrays.asList(readTestappPrivilege, writeTestappPrivilege, allTestappPrivilege)); - assertThat(response, equalTo(exptectedResponse)); + assertThat(response, equalTo(expectedResponse)); //tag::get-privileges-response Set privileges = response.getPrivileges(); //end::get-privileges-response @@ -1554,10 +1554,10 @@ public void testGetPrivileges() throws Exception { assertNotNull(response); assertThat(response.getPrivileges().size(), equalTo(6)); - final GetPrivilegesResponse exptectedResponse = + final GetPrivilegesResponse expectedResponse = new GetPrivilegesResponse(Arrays.asList(readTestappPrivilege, writeTestappPrivilege, allTestappPrivilege, readTestapp2Privilege, writeTestapp2Privilege, allTestapp2Privilege)); - assertThat(response, equalTo(exptectedResponse)); + assertThat(response, equalTo(expectedResponse)); } { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java index 5d00f879140e0..4c1ba6ee07f7c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java @@ -44,10 +44,10 @@ public static JobStats createRandomInstance() { ModelSizeStats modelSizeStats = randomBoolean() ? ModelSizeStatsTests.createRandomized() : null; ForecastStats forecastStats = randomBoolean() ? ForecastStatsTests.createRandom(1, 22) : null; NodeAttributes nodeAttributes = randomBoolean() ? NodeAttributesTests.createRandom() : null; - String assigmentExplanation = randomBoolean() ? randomAlphaOfLength(10) : null; + String assignmentExplanation = randomBoolean() ? randomAlphaOfLength(10) : null; TimeValue openTime = randomBoolean() ? TimeValue.timeValueMillis(randomIntBetween(1, 10000)) : null; - return new JobStats(jobId, dataCounts, state, modelSizeStats, forecastStats, nodeAttributes, assigmentExplanation, openTime); + return new JobStats(jobId, dataCounts, state, modelSizeStats, forecastStats, nodeAttributes, assignmentExplanation, openTime); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java index f59038af55af7..529cb3ff2993e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java @@ -47,8 +47,8 @@ public void testFromXContent() throws IOException { } public void testEqualsAndHashCode() { - final AuthenticateResponse reponse = createTestInstance(); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(reponse, this::copy, + final AuthenticateResponse response = createTestInstance(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, this::copy, this::mutate); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetPrivilegesResponseTests.java index 74211892a09e8..8a1cb21a44057 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetPrivilegesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetPrivilegesResponseTests.java @@ -106,10 +106,10 @@ public void usedDeprecatedField(String usedName, String replacedWith) { new ApplicationPrivilege("testapp2", "write", Arrays.asList("action:login", "data:write/*"), null); final ApplicationPrivilege allTestapp2Privilege = new ApplicationPrivilege("testapp2", "all", Arrays.asList("action:login", "data:write/*", "manage:*"), null); - final GetPrivilegesResponse exptectedResponse = + final GetPrivilegesResponse expectedResponse = new GetPrivilegesResponse(Arrays.asList(readTestappPrivilege, writeTestappPrivilege, allTestappPrivilege, readTestapp2Privilege, writeTestapp2Privilege, allTestapp2Privilege)); - assertThat(response, equalTo(exptectedResponse)); + assertThat(response, equalTo(expectedResponse)); } public void testEqualsHashCode() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java index b612c9ead28a5..01cb3e8788e71 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java @@ -106,10 +106,10 @@ private static GetRoleMappingsResponse mutateTestItem(GetRoleMappingsResponse or break; case 1: final List roleMappingsList2 = new ArrayList<>(); - ExpressionRoleMapping orginialRoleMapping = original.getMappings().get(0); - roleMappingsList2.add(new ExpressionRoleMapping(orginialRoleMapping.getName(), FieldRoleMapperExpression.ofGroups( + ExpressionRoleMapping originalRoleMapping = original.getMappings().get(0); + roleMappingsList2.add(new ExpressionRoleMapping(originalRoleMapping.getName(), FieldRoleMapperExpression.ofGroups( "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), - orginialRoleMapping.getRoles(), orginialRoleMapping.getMetadata(), !orginialRoleMapping.isEnabled())); + originalRoleMapping.getRoles(), originalRoleMapping.getMetadata(), !originalRoleMapping.isEnabled())); mutated = new GetRoleMappingsResponse(roleMappingsList2); break; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetSslCertificatesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetSslCertificatesResponseTests.java index fedcee5364988..597086a1e7445 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetSslCertificatesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetSslCertificatesResponseTests.java @@ -43,8 +43,8 @@ public void testFromXContent() throws IOException { .test(); } public void testEqualsAndHashCode() { - final GetSslCertificatesResponse reponse = createTestInstance(); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(reponse, this::copy, + final GetSslCertificatesResponse response = createTestInstance(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, this::copy, this::mutate); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationPrivilegeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationPrivilegeTests.java index b720187673023..13d46d1ec3788 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationPrivilegeTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationPrivilegeTests.java @@ -86,9 +86,9 @@ public void testEmptyApplicationName() { public void testEmptyPrivilegeName() { final Map metadata = new HashMap<>(); metadata.put("description", "Read access to myapp"); - final String privilegenName = randomBoolean() ? null : ""; + final String privilegeName = randomBoolean() ? null : ""; final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - new ApplicationPrivilege("myapp", privilegenName, Arrays.asList("data:read/*", "action:login"), metadata)); + new ApplicationPrivilege("myapp", privilegeName, Arrays.asList("data:read/*", "action:login"), metadata)); assertThat(e.getMessage(), equalTo("privilege name must be provided")); } diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index f180b52927545..addc3db9d46f3 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -186,19 +186,19 @@ public Roles(boolean masterEligible, boolean data, boolean ingest) { } /** - * Teturns whether or not the node could be elected master. + * Returns whether or not the node could be elected master. */ public boolean isMasterEligible() { return masterEligible; } /** - * Teturns whether or not the node stores data. + * Returns whether or not the node stores data. */ public boolean isData() { return data; } /** - * Teturns whether or not the node runs ingest pipelines. + * Returns whether or not the node runs ingest pipelines. */ public boolean isIngest() { return ingest; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java index f06c375d4302d..6cc4ce4955ac3 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java @@ -76,7 +76,7 @@ public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() { /** * How this request should handle warnings. If null (the default) then - * this request will default to the behavior dictacted by + * this request will default to the behavior dictated by * {@link RestClientBuilder#setStrictDeprecationMode}. *

* This can be set to {@link WarningsHandler#PERMISSIVE} if the client @@ -189,7 +189,7 @@ public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory /** * How this request should handle warnings. If null (the default) then - * this request will default to the behavior dictacted by + * this request will default to the behavior dictated by * {@link RestClientBuilder#setStrictDeprecationMode}. *

* This can be set to {@link WarningsHandler#PERMISSIVE} if the client diff --git a/dev-tools/prepare_release_update_documentation.py b/dev-tools/prepare_release_update_documentation.py index c7eae4eeb3245..707944a46026a 100644 --- a/dev-tools/prepare_release_update_documentation.py +++ b/dev-tools/prepare_release_update_documentation.py @@ -82,9 +82,9 @@ def process_file(file_path, line_callback): return False # Checks the pom.xml for the release version. -# This method fails if the pom file has no SNAPSHOT version set ie. +# This method fails if the pom file has no SNAPSHOT version set i.e. # if the version is already on a release version we fail. -# Returns the next version string ie. 0.90.7 +# Returns the next version string i.e. 0.90.7 def find_release_version(): with open('pom.xml', encoding='utf-8') as file: for line in file: diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index f78e2c2adf414..a212de91d17d0 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -198,7 +198,7 @@ def smoke_test_release(release, files, hash, plugins): run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path)) else: headers = {} - print(' Starting elasticsearch deamon from [%s]' % es_dir) + print(' Starting elasticsearch daemon from [%s]' % es_dir) try: run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true' % (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid'))) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index cf858702784d8..18aa5930a9cd2 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -240,7 +240,7 @@ Closure commonPackageConfig(String type, boolean oss) { apply plugin: 'nebula.ospackage-base' -// this is package indepdendent configuration +// this is package independent configuration ospackage { maintainer 'Elasticsearch Team ' summary ''' diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index f566c34c958ae..843715cbbf270 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -32,7 +32,7 @@ if /i %SERVICE_CMD% == install goto doInstall if /i %SERVICE_CMD% == remove goto doRemove if /i %SERVICE_CMD% == start goto doStart if /i %SERVICE_CMD% == stop goto doStop -if /i %SERVICE_CMD% == manager goto doManagment +if /i %SERVICE_CMD% == manager goto doManagement echo Unknown option "%SERVICE_CMD%" exit /B 1 @@ -61,7 +61,7 @@ goto:eof echo The service '%SERVICE_ID%' has been stopped goto:eof -:doManagment +:doManagement set EXECUTABLE_MGR=%ES_HOME%\bin\elasticsearch-service-mgr "%EXECUTABLE_MGR%" //ES//%SERVICE_ID% if not errorlevel 1 goto managed diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index fc2da33c3a688..0f44b3c0b5f7a 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -531,7 +531,7 @@ private Path downloadAndValidate( * ".asc" to the URL. It is expected that the plugin is signed with the Elastic signing key with ID D27D666CD88E42B4. * * @param zip the path to the downloaded plugin ZIP - * @param urlString the URL source of the downloade plugin ZIP + * @param urlString the URL source of the downloaded plugin ZIP * @throws IOException if an I/O exception occurs reading from various input streams * @throws PGPException if the PGP implementation throws an internal exception during verification */ @@ -603,7 +603,7 @@ InputStream getPublicKey() { /** * Creates a URL and opens a connection. * - * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. + * If the URL returns a 404, {@code null} is returned, otherwise the open URL object is returned. */ // pkg private for tests URL openUrl(String urlString) throws IOException { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 248eb364ebc64..9e932f190a03c 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -963,7 +963,7 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", Version.CURRENT, Build.CURRENT.getQualifiedVersion()); - // attemping to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception + // attempting to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception final UserException e = expectThrows(UserException.class, () -> assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, true)); assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); diff --git a/docs/build.gradle b/docs/build.gradle index 630226639337a..035667ee84309 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -287,8 +287,8 @@ buildRestTests.setups['stackoverflow'] = ''' body: |''' // Make Kibana strongly connected to elasticsearch and logstash -// Make Kibana rarer (and therefore higher-ranking) than Javascript -// Make Javascript strongly connected to jquery and angular +// Make Kibana rarer (and therefore higher-ranking) than JavaScript +// Make JavaScript strongly connected to jquery and angular // Make Cabana strongly connected to elasticsearch but only as a result of a single author for (int i = 0; i < 150; i++) { diff --git a/docs/java-api/docs/update-by-query.asciidoc b/docs/java-api/docs/update-by-query.asciidoc index d4fe7c1c0419f..ef58d3754276e 100644 --- a/docs/java-api/docs/update-by-query.asciidoc +++ b/docs/java-api/docs/update-by-query.asciidoc @@ -72,7 +72,7 @@ operation that executes: `noop`:: Set `ctx.op = "noop"` if your script doesn't make any -changes. The `updateByQuery` operaton then omits that document from the updates. +changes. The `updateByQuery` operation then omits that document from the updates. This behavior increments the `noop` counter in the response body. `delete`:: diff --git a/docs/java-rest/high-level/document/multi-get.asciidoc b/docs/java-rest/high-level/document/multi-get.asciidoc index f63bf898922f4..ca26139d23057 100644 --- a/docs/java-rest/high-level/document/multi-get.asciidoc +++ b/docs/java-rest/high-level/document/multi-get.asciidoc @@ -128,7 +128,7 @@ include-tagged::{doc-tests-file}[{api}-conflict] -------------------------------------------------- <1> `getResponse` is null. <2> `getFailure` isn't and contains an `Exception`. -<3> That `Exception` is actuall and `ElasticsearchException` +<3> That `Exception` is actually an `ElasticsearchException` <4> and it has a status of `CONFLICT`. It'd have been an HTTP 409 if this wasn't a multi get. <5> `getMessage` explains the actual cause, ` diff --git a/docs/java-rest/high-level/document/update-by-query.asciidoc b/docs/java-rest/high-level/document/update-by-query.asciidoc index fdf50148df4c8..be1692c4e9f6a 100644 --- a/docs/java-rest/high-level/document/update-by-query.asciidoc +++ b/docs/java-rest/high-level/document/update-by-query.asciidoc @@ -125,7 +125,7 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Update By Query Response -The returned +{resposne}+ contains information about the executed operations and +The returned +{response}+ contains information about the executed operations and allows to iterate over each result as follows: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 92a61febb6864..97a8a0934192d 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -144,7 +144,7 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-cl In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance will be referenced as `client`. -[[java-rest-hight-getting-started-request-options]] +[[java-rest-height-getting-started-request-options]] === RequestOptions All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can diff --git a/docs/java-rest/high-level/indices/unfreeze_index.asciidoc b/docs/java-rest/high-level/indices/unfreeze_index.asciidoc index d2c477b33316d..27e98581f0c72 100644 --- a/docs/java-rest/high-level/indices/unfreeze_index.asciidoc +++ b/docs/java-rest/high-level/indices/unfreeze_index.asciidoc @@ -15,7 +15,7 @@ An +{request}+ requires an `index` argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The index to unreeze +<1> The index to unfreeze ==== Optional arguments The following arguments can optionally be provided: diff --git a/docs/java-rest/high-level/migration/get-assistance.asciidoc b/docs/java-rest/high-level/migration/get-assistance.asciidoc index 20f857eb1fb41..723eb7d09053d 100644 --- a/docs/java-rest/high-level/migration/get-assistance.asciidoc +++ b/docs/java-rest/high-level/migration/get-assistance.asciidoc @@ -1,7 +1,7 @@ [[java-rest-high-migration-get-assistance]] === Migration Get Assistance -[[java-rest-high-migraton-get-assistance-request]] +[[java-rest-high-migration-get-assistance-request]] ==== Index Upgrade Info Request An `IndexUpgradeInfoRequest` does not require any argument: diff --git a/docs/java-rest/high-level/migration/upgrade.asciidoc b/docs/java-rest/high-level/migration/upgrade.asciidoc index b5bd33d693601..feabfa4ee48e5 100644 --- a/docs/java-rest/high-level/migration/upgrade.asciidoc +++ b/docs/java-rest/high-level/migration/upgrade.asciidoc @@ -8,7 +8,7 @@ [[java-rest-high-migration-upgrade]] === Migration Upgrade -[[java-rest-high-migraton-upgrade-request]] +[[java-rest-high-migration-upgrade-request]] ==== Index Upgrade Request An +{request}+ requires an index argument. Only one index at the time should be upgraded: @@ -32,7 +32,7 @@ include-tagged::{doc-tests-file}[{api}-execute] The returned +{response}+ contains information about the executed operation -[[java-rest-high-migraton-async-upgrade-request]] +[[java-rest-high-migration-async-upgrade-request]] ==== Asynchronous Execution The asynchronous execution of an upgrade request requires both the +{request}+ diff --git a/docs/java-rest/high-level/search/rank-eval.asciidoc b/docs/java-rest/high-level/search/rank-eval.asciidoc index 6db0dadd00ed7..195e1f92f3bfb 100644 --- a/docs/java-rest/high-level/search/rank-eval.asciidoc +++ b/docs/java-rest/high-level/search/rank-eval.asciidoc @@ -82,7 +82,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-response] <2> Partial results that are keyed by their query id <3> The metric score for each partial result <4> Rated search hits contain a fully fledged `SearchHit` -<5> Rated search hits also contain an `Optional` rating that +<5> Rated search hits also contain an `Optional` rating that is not present if the document did not get a rating in the request <6> Metric details are named after the metric used in the request <7> After casting to the metric used in the request, the diff --git a/docs/java-rest/high-level/security/get-privileges.asciidoc b/docs/java-rest/high-level/security/get-privileges.asciidoc index 06ae51e669081..6eee8bbc3c1f5 100644 --- a/docs/java-rest/high-level/security/get-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-privileges.asciidoc @@ -2,7 +2,7 @@ -- :api: get-privileges :request: GetPrivilegesRequest -:respnse: GetPrivilegesResponse +:response: GetPrivilegesResponse -- [id="{upid}-{api}"] diff --git a/docs/java-rest/high-level/security/get-roles.asciidoc b/docs/java-rest/high-level/security/get-roles.asciidoc index 9ecf36353c3e8..777349222992e 100644 --- a/docs/java-rest/high-level/security/get-roles.asciidoc +++ b/docs/java-rest/high-level/security/get-roles.asciidoc @@ -2,7 +2,7 @@ -- :api: get-roles :request: GetRolesRequest -:respnse: GetRolesResponse +:response: GetRolesResponse -- [id="{upid}-{api}"] diff --git a/docs/java-rest/high-level/security/get-users.asciidoc b/docs/java-rest/high-level/security/get-users.asciidoc index e9e4a0d94911b..1d41bd76166b4 100644 --- a/docs/java-rest/high-level/security/get-users.asciidoc +++ b/docs/java-rest/high-level/security/get-users.asciidoc @@ -2,7 +2,7 @@ -- :api: get-users :request: GetUsersRequest -:respnse: GetUsersResponse +:response: GetUsersResponse -- [id="{upid}-{api}"] diff --git a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc index 673423b69b983..ca2178e5c05e2 100644 --- a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc @@ -1,6 +1,6 @@ -- :api: deactivate-watch -:request: deactivateWatchRequet +:request: deactivateWatchRequest :response: deactivateWatchResponse :doc-tests-file: {doc-tests}/WatcherDocumentationIT.java -- diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 38104215720d7..3747314b6ecd3 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -328,7 +328,7 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-cus The client is quite happy to execute many actions in parallel. The following example indexes many documents in parallel. In a real world scenario you'd -probably want to use the `_bulk` API instead, but the example is illustative. +probably want to use the `_bulk` API instead, but the example is illustrative. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 30320def79b2d..200dd3f63d56f 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -65,7 +65,7 @@ are available in the script being tested. The following parameters may be specified in `context_setup` for a filter context: document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script. -index:: The name of an index containing a mapping that is compatable with the document being indexed. +index:: The name of an index containing a mapping that is compatible with the document being indexed. *Example* @@ -122,7 +122,7 @@ The `score` context executes scripts as if they were executed inside a `script_s The following parameters may be specified in `context_setup` for a score context: document:: Contains the document that will be temporarily indexed in-memory and is accessible from the script. -index:: The name of an index containing a mapping that is compatable with the document being indexed. +index:: The name of an index containing a mapping that is compatible with the document being indexed. query:: If `_score` is used in the script then a query can specified that will be used to compute a score. *Example* diff --git a/docs/perl/index.asciidoc b/docs/perl/index.asciidoc index fc487c735ebd6..d009b3d0460ac 100644 --- a/docs/perl/index.asciidoc +++ b/docs/perl/index.asciidoc @@ -28,7 +28,7 @@ This client provides: * Logging support via Log::Any -* Compatibility with the official clients for Python, Ruby, PHP and Javascript +* Compatibility with the official clients for Python, Ruby, PHP and JavaScript * Easy extensibility diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index 9d9df4827fd4e..e22f819e1eb3e 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -93,6 +93,6 @@ supported: `languageset`:: An array of languages to check. If not specified, then the language will - be guessed. Accepts: `any`, `comomon`, `cyrillic`, `english`, `french`, + be guessed. Accepts: `any`, `common`, `cyrillic`, `english`, `french`, `german`, `hebrew`, `hungarian`, `polish`, `romanian`, `russian`, `spanish`. diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index 77e97396b0e5e..4d51ff147d7a6 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -33,7 +33,7 @@ Issues and bug reports can usually be reported on the community plugin's web sit For advice on writing your own plugin, see <>. -IMPORTANT: Site plugins -- plugins containing HTML, CSS and Javascript -- are +IMPORTANT: Site plugins -- plugins containing HTML, CSS and JavaScript -- are no longer supported. include::plugin-script.asciidoc[] diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 912d9df2f4bd3..6d543408f679f 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -191,7 +191,7 @@ releases 2.0 and later do not support rivers. ==== Supported by the community: * https://github.com/kodcu/pes[Pes]: - A pluggable elastic Javascript query DSL builder for Elasticsearch + A pluggable elastic JavaScript query DSL builder for Elasticsearch * https://www.wireshark.org/[Wireshark]: Protocol dissection for Zen discovery, HTTP and the binary protocol diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index 13c6a7b62ccbd..df09b28093c80 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -139,7 +139,7 @@ Some examples, using scripts: [source,js] ---- -# The simpliest one +# The simplest one PUT _snapshot/my_backup1 { "type": "azure" diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index 1b975ef761d4a..005cc30895552 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -78,7 +78,7 @@ The following settings are supported: [[repository-hdfs-availability]] [float] -===== A Note on HDFS Availablility +===== A Note on HDFS Availability When you initialize a repository, its settings are persisted in the cluster state. When a node comes online, it will attempt to initialize all repositories for which it has settings. If your cluster has an HDFS repository configured, then all nodes in the cluster must be able to reach HDFS when starting. If not, then the node will fail to initialize the diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index 39a8255c90705..06641391ced32 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -263,7 +263,7 @@ image::images/pipeline_movavg/linear_100window.png[] The `ewma` model (aka "single-exponential") is similar to the `linear` model, except older data-points become exponentially less important, rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger -portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the +portion of the window. Larger values make the weight decay quickly, which reduces the impact of older values on the moving average. This tends to make the moving average track the data more closely but with less smoothing. The default value of `alpha` is `0.3`, and the setting accepts any float from 0-1 inclusive. diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index febd9bc8a55d2..5745527bddd6f 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -449,7 +449,7 @@ The `ewma` function (aka "single-exponential") is similar to the `linearMovAvg` except older data-points become exponentially less important, rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger -portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the +portion of the window. Larger values make the weight decay quickly, which reduces the impact of older values on the moving average. This tends to make the moving average track the data more closely but with less smoothing. `null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index cc873a4fe89ff..cc82d2eb8179f 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -58,7 +58,7 @@ The `fingerprint` analyzer accepts the following parameters: [horizontal] `separator`:: - The character to use to concate the terms. Defaults to a space. + The character to use to concatenate the terms. Defaults to a space. `max_output_size`:: diff --git a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc index e6bf79b0e961f..2c18e94878fb2 100644 --- a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc @@ -15,7 +15,7 @@ The `char_group` tokenizer accepts one parameter: `tokenize_on_chars`:: A list containing a list of characters to tokenize the string on. Whenever a character from this list is encountered, a new token is started. This accepts either single - characters like eg. `-`, or character groups: `whitespace`, `letter`, `digit`, + characters like e.g. `-`, or character groups: `whitespace`, `letter`, `digit`, `punctuation`, `symbol`. diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 6c474c2117943..2b893a4c79b11 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -10,7 +10,7 @@ GET /_cat/nodeattrs?v -------------------------------------------------- // CONSOLE // TEST[s/\?v/\?v&s=node,attr/] -// Sort the resulting attributes so we can assert on them more easilly +// Sort the resulting attributes so we can assert on them more easily Could look like: @@ -55,7 +55,7 @@ GET /_cat/nodeattrs?v&h=name,pid,attr,value -------------------------------------------------- // CONSOLE // TEST[s/,value/,value&s=node,attr/] -// Sort the resulting attributes so we can assert on them more easilly +// Sort the resulting attributes so we can assert on them more easily Might look like: diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index 076e84b72b5d3..304459ba96edb 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -12,7 +12,7 @@ GET /_cat/templates?v&s=name // TEST[s/^/PUT _template\/template0\n{"index_patterns": "te*", "order": 0}\n/] // TEST[s/^/PUT _template\/template1\n{"index_patterns": "tea*", "order": 1}\n/] // TEST[s/^/PUT _template\/template2\n{"index_patterns": "teak*", "order": 2, "version": 7}\n/] -// The substitions do two things: +// The substitutions do two things: // 1. Filter the response to just templates matching the te* pattern // so that we only get the templates we expect regardless of which // templates exist. If xpack is installed there will be unexpected diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 755bf63f0183f..766f502ff93a3 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -47,7 +47,7 @@ GET //_ccr/stats // CONSOLE // TEST[s//follower_index/] -==== Path Parmeters +==== Path Parameters `index` :: (string) a comma-delimited list of index patterns diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index 1bc2c3ef28838..6507c04ac5026 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -50,7 +50,7 @@ POST //_ccr/unfollow // CONSOLE // TEST[s//follower_index/] -==== Path Parmeters +==== Path Parameters `follower_index` (required):: (string) the name of the follower index diff --git a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc index f02ac8e46576b..68bca72be248c 100644 --- a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc +++ b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc @@ -3,10 +3,10 @@ The cluster nodes reload secure settings API is used to re-read the local node's encrypted keystore. Specifically, it will prompt the keystore -decryption and reading accross the cluster. The keystore's plain content is +decryption and reading across the cluster. The keystore's plain content is used to reinitialize all compatible plugins. A compatible plugin can be -reinitilized without restarting the node. The operation is -complete when all compatible plugins have finished reinitilizing. Subsequently, +reinitialized without restarting the node. The operation is +complete when all compatible plugins have finished reinitializing. Subsequently, the keystore is closed and any changes to it will not be reflected on the node. [source,js] diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index eb3abb19d1adf..4bd3c2c9647a5 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -279,7 +279,7 @@ the operating system: `os.cgroup.memory.limit_in_bytes`. NOTE: For the cgroup stats to be visible, cgroups must be compiled into -the kernal, the `cpu` and `cpuacct` cgroup subsystems must be +the kernel, the `cpu` and `cpuacct` cgroup subsystems must be configured and stats must be readable from `/sys/fs/cgroup/cpu` and `/sys/fs/cgroup/cpuacct`. diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index 639c2fbaddd6d..4d6a8d963afb3 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -119,7 +119,7 @@ Which returns the following information: <9> the definition of the phase (in this case, the "warm" phase) that the index is currently on The index here has been moved to the error step because the shrink definition in -the policy is using an incorrect number of shards. So rectifing that in the +the policy is using an incorrect number of shards. So rectifying that in the policy entails updating the existing policy to use one instead of four for the targeted number of shards. diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index 3af6e125fcd99..8e4a23a08af9e 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -18,7 +18,7 @@ resource usage. You control when the rollover action is triggered by specifying one or more rollover parameters. The rollover is performed once any of the criteria are met. Because the criteria are checked periodically, the index might grow -slightly beyond the specified threshold. To control how often the critera are +slightly beyond the specified threshold. To control how often the criteria are checked, specify the `indices.lifecycle.poll_interval` cluster setting. IMPORTANT: New indices created via rollover will not automatically inherit the diff --git a/docs/reference/index-modules/allocation/prioritization.asciidoc b/docs/reference/index-modules/allocation/prioritization.asciidoc index 92051cc4dbc57..6693e6adb755e 100644 --- a/docs/reference/index-modules/allocation/prioritization.asciidoc +++ b/docs/reference/index-modules/allocation/prioritization.asciidoc @@ -10,7 +10,7 @@ Indices are sorted into priority order as follows: This means that, by default, newer indices will be recovered before older indices. -Use the per-index dynamically updateable `index.priority` setting to customise +Use the per-index dynamically updatable `index.priority` setting to customise the index prioritization order. For instance: [source,js] diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 37d48eec2a215..c6bf60182fd76 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -27,7 +27,7 @@ flush can be executed if another flush operation is already executing. The default is `false` and will cause an exception to be thrown on the shard level if another flush operation is already running. -`force`:: Whether a flush should be forced even if it is not necessarily needed ie. +`force`:: Whether a flush should be forced even if it is not necessarily needed i.e. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal) diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc index a95b1c81ae189..9ccd78f8f5df2 100644 --- a/docs/reference/indices/stats.asciidoc +++ b/docs/reference/indices/stats.asciidoc @@ -88,7 +88,7 @@ GET /_stats/search?groups=group1,group2 The stats returned are aggregated on the index level, with `primaries` and `total` aggregations, where `primaries` are the values for only the -primary shards, and `total` are the cumulated values for both primary and replica shards. +primary shards, and `total` are the accumulated values for both primary and replica shards. In order to get back shard level stats, set the `level` parameter to `shards`. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 7f6bbb5302af9..578bf35cb2446 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -428,7 +428,7 @@ For example: `'Guest'.equalsIgnoreCase(ctx.network?.name)` is null safe because since `ctx.network?.name` can return null. Some situations require an explicit null check. In the following example there -is not null safe alternative, so an explict null check is needed. +is not null safe alternative, so an explicit null check is needed. [source,js] -------------------------------------------------- diff --git a/docs/reference/ingest/processors/dissect.asciidoc b/docs/reference/ingest/processors/dissect.asciidoc index 0bcd1a27c7437..0c04e7ed07396 100644 --- a/docs/reference/ingest/processors/dissect.asciidoc +++ b/docs/reference/ingest/processors/dissect.asciidoc @@ -47,7 +47,7 @@ Later dissect matches the `[` and then `]` and then assigns `@timestamp` to ever Paying special attention the parts of the string to discard will help build successful dissect patterns. Successful matches require all keys in a pattern to have a value. If any of the `%{keyname}` defined in the pattern do -not have a value, then an exception is thrown and may be handled by the <> directive. +not have a value, then an exception is thrown and may be handled by the <> directive. An empty key `%{}` or a <> can be used to match values, but exclude the value from the final document. All matched values are represented as string data types. The <> may be used to convert to expected data type. diff --git a/docs/reference/ingest/processors/dot-expand.asciidoc b/docs/reference/ingest/processors/dot-expand.asciidoc index b3322c96a25f8..1e8eb7da6cf03 100644 --- a/docs/reference/ingest/processors/dot-expand.asciidoc +++ b/docs/reference/ingest/processors/dot-expand.asciidoc @@ -5,7 +5,7 @@ Expands a field with dots into an object field. This processor allows fields with dots in the name to be accessible by other processors in the pipeline. Otherwise these <> can't be accessed by any processor. -[[dot-expender-options]] +[[dot-expander-options]] .Dot Expand Options [options="header"] |====== diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index fabd58a7096ff..7b80422c3aa2f 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -189,7 +189,7 @@ Which returns: ===== Recognizing Location as a Geopoint Although this processor enriches your document with a `location` field containing the estimated latitude and longitude of the IP address, this field will not be -indexed as a {ref}/geo-point.html[`geo_point`] type in Elasticsearch without explicitely defining it +indexed as a {ref}/geo-point.html[`geo_point`] type in Elasticsearch without explicitly defining it as such in the mapping. You can use the following mapping for the example index above: diff --git a/docs/reference/ingest/processors/grok.asciidoc b/docs/reference/ingest/processors/grok.asciidoc index 315caff0dc6e4..073e8246d73c7 100644 --- a/docs/reference/ingest/processors/grok.asciidoc +++ b/docs/reference/ingest/processors/grok.asciidoc @@ -17,7 +17,7 @@ If you need help building patterns to match your logs, you will find the {kibana Grok sits on top of regular expressions, so any regular expressions are valid in grok as well. The regular expression library is Oniguruma, and you can see the full supported regexp syntax -https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Onigiruma site]. +https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma site]. Grok works by leveraging this regular expression language to allow naming existing patterns and combining them into more complex patterns that match your fields. diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index 4fbed66449800..3ad7da6e17744 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -48,7 +48,7 @@ reordered or deleted after they were initially added. The `match_mapping_type` is the datatype detected by the json parser. Since JSON doesn't allow to distinguish a `long` from an `integer` or a `double` from -a `float`, it will always choose the wider datatype, ie. `long` for integers +a `float`, it will always choose the wider datatype, i.e. `long` for integers and `double` for floating-point numbers. The following datatypes may be automatically detected: diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index a0be0fb3ccbeb..07086f0947e0f 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -17,7 +17,7 @@ configuration are: `BM25`:: The Okapi BM25 algorithm. The algorithm used by default in Elasticsearch and Lucene. - See {defguide}/pluggable-similarites.html[Pluggable Similarity Algorithms] + See {defguide}/pluggable-similarities.html[Pluggable Similarity Algorithms] for more information. `classic`:: diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 8efb184afa6ba..92ee2d1065100 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -121,7 +121,7 @@ near perfect spatial resolution (down to 1e-7 decimal degree precision) since al spatial relations are computed using an encoded vector representation of the original shape instead of a raster-grid representation as used by the <> indexing approach. Performance of the tessellator primarily -depends on the number of vertices that define the polygon/multi-polyogn. While +depends on the number of vertices that define the polygon/multi-polygon. While this is the default indexing technique prefix trees can still be used by setting the `tree` or `strategy` parameters according to the appropriate <>. Note that these parameters are now deprecated diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index a80f4a3b11b57..180a8190868f0 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -106,7 +106,7 @@ keepalives cannot be configured. ==== Transport Compression [float] -===== Request Compresssion +===== Request Compression By default, the `transport.compress` setting is `false` and network-level request compression is disabled between nodes in the cluster. This default diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc index 387278a432af6..5d1683854375b 100644 --- a/docs/reference/query-dsl/feature-query.asciidoc +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -213,7 +213,7 @@ exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the and scores are in +(0, 1)+. `exponent` must be positive, but is typically in +[0.5, 1]+. A good value should -be computed via traning. If you don't have the opportunity to do so, we recommend +be computed via training. If you don't have the opportunity to do so, we recommend that you stick to the `saturation` function instead. [source,js] diff --git a/docs/reference/release-notes/7.0.0-alpha2.asciidoc b/docs/reference/release-notes/7.0.0-alpha2.asciidoc index 7f66d21408224..b07088d6cfed6 100644 --- a/docs/reference/release-notes/7.0.0-alpha2.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha2.asciidoc @@ -18,7 +18,7 @@ Index APIs:: * Always enforce cluster-wide shard limit {pull}34892[#34892] (issues: {issue}20705[#20705], {issue}34021[#34021]) Ranking:: -* Forbid negative scores in functon_score query {pull}35709[#35709] (issue: {issue}33309[#33309]) +* Forbid negative scores in function_score query {pull}35709[#35709] (issue: {issue}33309[#33309]) Scripting:: * Delete deprecated getValues from ScriptDocValues {pull}36183[#36183] (issue: {issue}22919[#22919]) @@ -26,7 +26,7 @@ Scripting:: Search:: * Remove the deprecated _termvector endpoint. {pull}36131[#36131] (issues: {issue}36098[#36098], {issue}8484[#8484]) * Remove deprecated Graph endpoints {pull}35956[#35956] -* Validate metdata on `_msearch` {pull}35938[#35938] (issue: {issue}35869[#35869]) +* Validate metadata on `_msearch` {pull}35938[#35938] (issue: {issue}35869[#35869]) * Make hits.total an object in the search response {pull}35849[#35849] (issue: {issue}33028[#33028]) * Remove the distinction between query and filter context in QueryBuilders {pull}35354[#35354] (issue: {issue}35293[#35293]) * Throw a parsing exception when boost is set in span_or query (#28390) {pull}34112[#34112] (issue: {issue}28390[#28390]) @@ -544,7 +544,7 @@ Search:: * Add a More Like This query routing requirement check (#29678) {pull}33974[#33974] Security:: -* Remove license state listeners on closables {pull}36308[#36308] (issues: {issue}33328[#33328], {issue}35627[#35627], {issue}35628[#35628]) +* Remove license state listeners on closeables {pull}36308[#36308] (issues: {issue}33328[#33328], {issue}35627[#35627], {issue}35628[#35628]) Snapshot/Restore:: * Upgrade GCS Dependencies to 1.55.0 {pull}36634[#36634] (issues: {issue}35229[#35229], {issue}35459[#35459]) diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 81c464b71d575..c549b5e7a689b 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -126,7 +126,7 @@ GET /my_index/_rank_eval <1> the template id <2> the template definition to use -<3> a reference to a previously defined temlate +<3> a reference to a previously defined template <4> the parameters to use to fill the template [float] diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 819e3de98f4bd..9da482af8493a 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -11,7 +11,7 @@ cluster where {xpack} is installed, then you must download and configure the . Add the {xpack} transport JAR file to your *CLASSPATH*. You can download the {xpack} distribution and extract the JAR file manually or you can get it from the -https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearc Maven repository]. +https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearch Maven repository]. As with any dependency, you will also need its transitive dependencies. Refer to the https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.pom[X-Pack POM file for your version] when downloading for offline usage. diff --git a/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc b/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc index 3e8092babe8cd..779a27a06c752 100644 --- a/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc @@ -21,7 +21,7 @@ Add the {es} JDBC driver to DbVisualizer through *Tools* > *Driver Manager*: image:images/sql/client-apps/dbvis-1-driver-manager.png[] Create a new driver entry through *Driver* > *Create Driver* entry and add the JDBC driver in the files panel -through the buttons on the right. Once specify, the driver class and its version should be automatically picked up - one can force the refresh through the *Find driver in liste locations* button, the second from the bottom on the right hand side: +through the buttons on the right. Once specify, the driver class and its version should be automatically picked up - one can force the refresh through the *Find driver in listed locations* button, the second from the bottom on the right hand side: image:images/sql/client-apps/dbvis-2-driver.png[] diff --git a/docs/reference/sql/endpoints/odbc/applications-microstrat.asciidoc b/docs/reference/sql/endpoints/odbc/applications-microstrat.asciidoc index 6e38322ef06e5..92357e97a34a8 100644 --- a/docs/reference/sql/endpoints/odbc/applications-microstrat.asciidoc +++ b/docs/reference/sql/endpoints/odbc/applications-microstrat.asciidoc @@ -80,7 +80,7 @@ image:images/sql/odbc/apps_microstrat_loadtable.png[] + . Data Access Mode + -Choose a table to load data from and press the _Finish_ button. When doing so, the application offers to choose a loading methdology. +Choose a table to load data from and press the _Finish_ button. When doing so, the application offers to choose a loading methodology. You can choose whichever, we'll exemplify the _Connect Live_ way: + [[apps_microstrat_live]] diff --git a/docs/reference/sql/endpoints/odbc/applications-powerbi.asciidoc b/docs/reference/sql/endpoints/odbc/applications-powerbi.asciidoc index 6a7ca8d62c6d0..0bdac88ab0463 100644 --- a/docs/reference/sql/endpoints/odbc/applications-powerbi.asciidoc +++ b/docs/reference/sql/endpoints/odbc/applications-powerbi.asciidoc @@ -45,7 +45,7 @@ tables will load a preview of the data within: .Pick table to load image:images/sql/odbc/apps_pbi_picktable.png[] -Now tick the chosen table and click on the _Load_ button. Power BI will now load and anlyze the data, populating a list with the available +Now tick the chosen table and click on the _Load_ button. Power BI will now load and analyze the data, populating a list with the available columns. These can now be used to build the desired visualisation: [[apps_pbi_loaded]] diff --git a/docs/reference/sql/endpoints/odbc/configuration.asciidoc b/docs/reference/sql/endpoints/odbc/configuration.asciidoc index 679e528cc775f..dbc6f7b87c729 100644 --- a/docs/reference/sql/endpoints/odbc/configuration.asciidoc +++ b/docs/reference/sql/endpoints/odbc/configuration.asciidoc @@ -160,7 +160,7 @@ security option and is the recommended setting for production deployments. + * Certificate File + -In case the server uses a certificate that is not part of the PKI, for example usaing a self-signed certificate, you can configure the path to a X.509 certificate file that will be used by the driver to validate server's offered certificate. +In case the server uses a certificate that is not part of the PKI, for example using a self-signed certificate, you can configure the path to a X.509 certificate file that will be used by the driver to validate server's offered certificate. + The driver will only read the contents of the file just before a connection is attempted. See <> section further on how to check the validity of the provided parameters. + diff --git a/docs/reference/sql/endpoints/odbc/installation.asciidoc b/docs/reference/sql/endpoints/odbc/installation.asciidoc index 0aab1b68bda57..e112e24bba78f 100644 --- a/docs/reference/sql/endpoints/odbc/installation.asciidoc +++ b/docs/reference/sql/endpoints/odbc/installation.asciidoc @@ -76,7 +76,7 @@ If you encounter an error during installation we would encourage you to open an [[installation-cmd]] ==== Installation using the command line -NOTE: The examples given below apply to installation of the 64 bit MSI package. To acheive the same result with the 32 bit MSI package you would instead use the filename suffix `windows-x86.msi` +NOTE: The examples given below apply to installation of the 64 bit MSI package. To achieve the same result with the 32 bit MSI package you would instead use the filename suffix `windows-x86.msi` The `.msi` can also be installed via the command line. The simplest installation using the same defaults as the GUI is achieved by first navigating to the download directory, then running: diff --git a/docs/reference/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc index 814742b5f78c9..c5b3f0f5399e8 100644 --- a/docs/reference/sql/overview.asciidoc +++ b/docs/reference/sql/overview.asciidoc @@ -30,7 +30,7 @@ No need for additional hardware, processes, runtimes or libraries to query {es}; Lightweight and efficient:: -{es-sql} does not abstract {es} and its search capabilities - on the contrary, it embraces and exposes SQL to allow proper full-text search, in real-time, in the same declarative, succint fashion. +{es-sql} does not abstract {es} and its search capabilities - on the contrary, it embraces and exposes SQL to allow proper full-text search, in real-time, in the same declarative, succinct fashion. diff --git a/docs/reference/upgrade/reindex_upgrade.asciidoc b/docs/reference/upgrade/reindex_upgrade.asciidoc index c13343b64c38a..0f1eb155e6433 100644 --- a/docs/reference/upgrade/reindex_upgrade.asciidoc +++ b/docs/reference/upgrade/reindex_upgrade.asciidoc @@ -48,7 +48,7 @@ pre-5.x indices forward to 6.x. Data in time-based indices generally becomes less useful as time passes and are deleted as they age past your retention period. -Unless you have an unusally long retention period, you can just +Unless you have an unusually long retention period, you can just wait to upgrade to 6.x until all of your pre-5.x indices have been deleted. diff --git a/docs/reference/upgrade/set-paths-tip.asciidoc b/docs/reference/upgrade/set-paths-tip.asciidoc index 2dd120767c268..adfe3e29dac3a 100644 --- a/docs/reference/upgrade/set-paths-tip.asciidoc +++ b/docs/reference/upgrade/set-paths-tip.asciidoc @@ -2,7 +2,7 @@ ================================================ When you extract the zip or tarball packages, the `elasticsearch-n.n.n` -directory contains the Elasticsearh `config`, `data`, `logs` and +directory contains the Elasticsearch `config`, `data`, `logs` and `plugins` directories. We recommend moving these directories out of the Elasticsearch directory diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 8c14aca953512..27aa620a34fd2 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -607,7 +607,7 @@ Currently, the circuit breaker protects against loading too much field data by e Elasticsearch has moved from an object-based cache to a page-based cache recycler as described in issue {GIT}4557[#4557]. This change makes garbage collection easier by limiting fragmentation, since all pages have the same size and are recycled. It also allows managing the size of the cache not based on the number of objects it contains, but on the memory that it uses. -These pages are used for two main purposes: implementing higher level data structures such as hash tables that are used internally by aggregations to eg. map terms to counts, as well as reusing memory in the translog/transport layer as detailed in issue {GIT}5691[#5691]. +These pages are used for two main purposes: implementing higher level data structures such as hash tables that are used internally by aggregations to e.g. map terms to counts, as well as reusing memory in the translog/transport layer as detailed in issue {GIT}5691[#5691]. [float] === Dedicated Master Nodes Resiliency (STATUS: DONE, v1.0.0) diff --git a/docs/ruby/client.asciidoc b/docs/ruby/client.asciidoc index 0301e47d8bcdf..2037ae1a0b280 100644 --- a/docs/ruby/client.asciidoc +++ b/docs/ruby/client.asciidoc @@ -101,7 +101,7 @@ persistent ("keep-alive") HTTP connections. === Extensions The https://github.com/elastic/elasticsearch-ruby/tree/master/elasticsearch-extensions[`elasticsearch-extensions`] -Rubygem provides a number of extensions to the core client, such as an API to programatically launch +Rubygem provides a number of extensions to the core client, such as an API to programmatically launch Elasticsearch clusters (eg. for testing purposes), and more. Please see its diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index dbda453e5f9fb..b4a6c49754869 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -66,8 +66,8 @@ public static Iterable parameters() throws Exception { entries.addAll(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS); entries.add(new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("compare_analyzers"), CompareAnalyzers::parse)); - NamedXContentRegistry executeableSectionRegistry = new NamedXContentRegistry(entries); - return ESClientYamlSuiteTestCase.createParameters(executeableSectionRegistry); + NamedXContentRegistry executableSectionRegistry = new NamedXContentRegistry(entries); + return ESClientYamlSuiteTestCase.createParameters(executableSectionRegistry); } @Override diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 3de0ae5117e6a..9e14b3eb5cf40 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -63,7 +63,7 @@ public class JarHell { /** no instantiation */ private JarHell() {} - /** Simple driver class, can be used eg. from builds. Returns non-zero on jar-hell */ + /** Simple driver class, can be used e.g. from builds. Returns non-zero on jar-hell */ @SuppressForbidden(reason = "command line tool") public static void main(String args[]) throws Exception { System.out.println("checking for jar hell..."); diff --git a/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java b/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java index 1e7bdc0e78faa..ef3f75aba2a1d 100644 --- a/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java @@ -24,9 +24,9 @@ * to itself are not in user anymore. This implements basic reference counting * for instance if async operations holding on to services that are close concurrently * but should be functional until all async operations have joined - * Classes implementing this interface should ref counted at any time ie. if an object is used it's reference count should + * Classes implementing this interface should ref counted at any time i.e. if an object is used it's reference count should * be increased before using it by calling #incRef and a corresponding #decRef must be called in a try/finally - * block to release the object again ie.: + * block to release the object again i.e.: *

  *      inst.incRef();
  *      try {
diff --git a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java
index c22cec98eb79a..24cec85ca21b0 100644
--- a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java
+++ b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java
@@ -372,8 +372,8 @@ private void assertMatch(String pattern, String input, List expectedKeys
         assertMatch(pattern, input, expectedKeys, expectedValues, null);
     }
 
-    private void assertMatch(String pattern, String input, List expectedKeys, List expectedValues, String appendSeperator) {
-        Map results = new DissectParser(pattern, appendSeperator).parse(input);
+    private void assertMatch(String pattern, String input, List expectedKeys, List expectedValues, String appendSeparator) {
+        Map results = new DissectParser(pattern, appendSeparator).parse(input);
         List foundKeys = new ArrayList<>(results.keySet());
         List foundValues = new ArrayList<>(results.values());
         Collections.sort(foundKeys);
diff --git a/libs/grok/src/main/resources/patterns/java b/libs/grok/src/main/resources/patterns/java
index 01766b8ebd165..0a455251a8e5d 100644
--- a/libs/grok/src/main/resources/patterns/java
+++ b/libs/grok/src/main/resources/patterns/java
@@ -14,5 +14,5 @@ CATALINA_DATESTAMP %{MONTH} %{MONTHDAY}, 20%{YEAR} %{HOUR}:?%{MINUTE}(?::?%{SECO
 # yyyy-MM-dd HH:mm:ss,SSS ZZZ eg: 2014-01-09 17:32:25,527 -0800
 TOMCAT_DATESTAMP 20%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND}) %{ISO8601_TIMEZONE}
 CATALINALOG %{CATALINA_DATESTAMP:timestamp} %{JAVACLASS:class} %{JAVALOGMESSAGE:logmessage}
-# 2014-01-09 20:03:28,269 -0800 | ERROR | com.example.service.ExampleService - something compeletely unexpected happened...
+# 2014-01-09 20:03:28,269 -0800 | ERROR | com.example.service.ExampleService - something completely unexpected happened...
 TOMCATLOG %{TOMCAT_DATESTAMP:timestamp} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage}
diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java
index 0193811a20d0a..df6e38056036e 100644
--- a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java
+++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java
@@ -119,7 +119,7 @@ public void testNamedFieldsWithWholeTextMatch() {
         assertThat(matches.get("stimestamp"), equalTo("11/01/01"));
     }
 
-    public void testWithOniguramaNamedCaptures() {
+    public void testWithOnigurumaNamedCaptures() {
         Grok grok = new Grok(basePatterns, "(?\\w+)");
         Map matches = grok.captures("hello world");
         assertThat(matches.get("foo"), equalTo("hello"));
diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java
index d5a91dd0e19c7..7fdd30aeb78c5 100644
--- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java
+++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java
@@ -82,7 +82,7 @@ protected Map getFieldCounts() {
         return Collections.unmodifiableMap(results.counts);
     }
 
-    /** return the fied count for the requested field */
+    /** return the field count for the requested field */
     public long getFieldCount(String field) {
         if (results.counts.containsKey(field) == false) {
             return 0;
diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java
index 1be3279e8eaf1..f1ad628bd837f 100644
--- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java
+++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java
@@ -265,7 +265,7 @@ public void merge(final RunningStats other) {
             nB2 = nB * nB;              // doc B num samples squared
             // variance
             variances.put(fieldName, varA + varB + d2 * nA * other.docCount / docCount);
-            // skeewness
+            // skewness
             newSkew = skewA + skewB + d3 * nA * nB * (nA - nB) / n2;
             skewness.put(fieldName, newSkew + 3D * d * (nA * varB - nB * varA) / docCount);
             // kurtosis
diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java
index a63dd97568807..5b1055311f901 100644
--- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java
+++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java
@@ -142,7 +142,7 @@ public void testSynonymsWrappedByMultiplexer() throws IOException {
         indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
 
         BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("synonymAnalyzer"), "Some developers are odd",
-            new String[]{ "some", "developers", "develop", "programm", "are", "odd" },
+            new String[]{ "some", "developers", "develop", "program", "are", "odd" },
             new int[]{ 1, 1, 0, 0, 1, 1 });
     }
 
@@ -178,7 +178,7 @@ public void testKeywordRepeatAndSynonyms() throws IOException {
         indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
 
         BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("synonymAnalyzer"), "programmers",
-            new String[]{ "programmers", "programm", "develop" },
+            new String[]{ "programmers", "program", "develop" },
             new int[]{ 1, 0, 0 });
     }
 
diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle
index 02f8e465c304f..887bbb668b240 100644
--- a/modules/ingest-geoip/build.gradle
+++ b/modules/ingest-geoip/build.gradle
@@ -18,7 +18,7 @@
  */
 
 esplugin {
-  description 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database'
+  description 'Ingest processor that uses looksup geo data based on ip addresses using the Maxmind geo database'
   classname 'org.elasticsearch.ingest.geoip.IngestGeoIpPlugin'
 }
 
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java
index c672956cb0719..efc317486975f 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java
@@ -174,7 +174,7 @@ static MethodHandle arrayLengthGetter(Class arrayType) {
      * A dynamic method call for variable {@code x} of type {@code def} looks like:
      * {@code x.method(args...)}
      * 

- * This method traverses {@code recieverClass}'s class hierarchy (including interfaces) + * This method traverses {@code receiverClass}'s class hierarchy (including interfaces) * until it finds a matching whitelisted method. If one is not found, it throws an exception. * Otherwise it returns a handle to the matching method. *

@@ -353,7 +353,7 @@ private static MethodHandle lookupReferenceInternal(PainlessLookup painlessLooku *

  • The value in a list at element {@code field} (integer) when the receiver is a List. * *

    - * This method traverses {@code recieverClass}'s class hierarchy (including interfaces) + * This method traverses {@code receiverClass}'s class hierarchy (including interfaces) * until it finds a matching whitelisted getter. If one is not found, it throws an exception. * Otherwise it returns a handle to the matching getter. *

    @@ -409,7 +409,7 @@ static MethodHandle lookupGetter(PainlessLookup painlessLookup, Class receive *

  • The value in a list at element {@code field} (integer) when the receiver is a List. * *

    - * This method traverses {@code recieverClass}'s class hierarchy (including interfaces) + * This method traverses {@code receiverClass}'s class hierarchy (including interfaces) * until it finds a matching whitelisted setter. If one is not found, it throws an exception. * Otherwise it returns a handle to the matching setter. *

    diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index cd50de3025ae8..d004fbe3b5904 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -242,7 +242,7 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context * to create new factories of the {@link ScriptContext#statefulFactoryClazz}. * @param loader The {@link ClassLoader} that is used to define the factory class and script class. * @param context The {@link ScriptContext}'s semantics are used to define the factory class. - * @param classType The type to be instaniated in the newFactory or newInstance method. Depends + * @param classType The type to be instantiated in the newFactory or newInstance method. Depends * on whether a {@link ScriptContext#statefulFactoryClazz} is specified. * @param The factory class. * @return A factory class that will return script instances. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java index 0b751b7d2f78f..2ce528fb7b920 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Augmentation.java @@ -453,7 +453,7 @@ public static Map> groupBy(Map receiver, BiFunction replacementBuilder) { Matcher m = pattern.matcher(receiver); if (false == m.find()) { - // CharSequqence's toString is *supposed* to always return the characters in the sequence as a String + // CharSequence's toString is *supposed* to always return the characters in the sequence as a String return receiver.toString(); } StringBuffer result = new StringBuffer(initialBufferForReplaceWith(receiver)); @@ -471,7 +471,7 @@ public static String replaceAll(CharSequence receiver, Pattern pattern, Function public static String replaceFirst(CharSequence receiver, Pattern pattern, Function replacementBuilder) { Matcher m = pattern.matcher(receiver); if (false == m.find()) { - // CharSequqence's toString is *supposed* to always return the characters in the sequence as a String + // CharSequence's toString is *supposed* to always return the characters in the sequence as a String return receiver.toString(); } StringBuffer result = new StringBuffer(initialBufferForReplaceWith(receiver)); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java index 824582f5ad30d..c4d755c59bca2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java @@ -99,7 +99,7 @@ abstract class AStoreable extends AExpression { abstract void load(MethodWriter writer, Globals globals); /** - * Called to store a storabable to local memory. + * Called to store a storable to local memory. */ abstract void store(MethodWriter writer, Globals globals); @@ -116,7 +116,7 @@ static void writeIndexFlip(MethodWriter writer, Consumer writeGetL writer.swap(); // negative_index, array writer.dupX1(); // array, negative_index, array writeGetLength.accept(writer); // array, negative_index, length - writer.visitInsn(Opcodes.IADD); // array, noralized_index - writer.mark(noFlip); // array, noralized_index + writer.visitInsn(Opcodes.IADD); // array, normalized_index + writer.mark(noFlip); // array, normalized_index } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java index 43b0feb0009e7..b3ea74b1761d2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubNullSafeCallInvoke.java @@ -34,7 +34,7 @@ */ public class PSubNullSafeCallInvoke extends AExpression { /** - * The expression gaurded by the null check. Required at construction time and replaced at analysis time. + * The expression guarded by the null check. Required at construction time and replaced at analysis time. */ private AExpression guarded; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java index 8ba8b79b74a92..15c097c9613e5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java @@ -142,7 +142,7 @@ * loads from a postfix node works in the same fashion. Stores work somewhat differently as * described by later documentation. *

    - * Storebable nodes have three methods for writing -- setup, load, and store. These methods + * Storeable nodes have three methods for writing -- setup, load, and store. These methods * are used in conjunction with a parent node aware of the storeable node (lhs) that has a node * representing a value to store (rhs). The setup method is always once called before a store * to give storeable nodes a chance to write any prefixes they may have and any values such as diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ElvisTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ElvisTests.java index da0822c8f7555..0f61ca88356f1 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ElvisTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ElvisTests.java @@ -108,15 +108,15 @@ private void checkOneBranch(String code, boolean expectOneBranch) { assertThat(disassembled, firstElvisDestinationLabelIndex, greaterThan(-1)); String firstElvisDestinationLabel = disassembled.substring(firstElvisDestinationLabelIndex + "IFNONNULL ".length(), disassembled.indexOf('\n', firstElvisDestinationLabelIndex)); - int firstElvisDestionation = disassembled.indexOf(" " + firstElvisDestinationLabel); - assertThat(disassembled, firstElvisDestionation, greaterThan(-1)); - int ifAfterFirstElvisDestination = disassembled.indexOf("IF", firstElvisDestionation); + int firstElvisDestination = disassembled.indexOf(" " + firstElvisDestinationLabel); + assertThat(disassembled, firstElvisDestination, greaterThan(-1)); + int ifAfterFirstElvisDestination = disassembled.indexOf("IF", firstElvisDestination); if (expectOneBranch) { assertThat(disassembled, ifAfterFirstElvisDestination, lessThan(0)); } else { assertThat(disassembled, ifAfterFirstElvisDestination, greaterThan(-1)); } - int returnAfterFirstElvisDestination = disassembled.indexOf("RETURN", firstElvisDestionation); + int returnAfterFirstElvisDestination = disassembled.indexOf("RETURN", firstElvisDestination); assertThat(disassembled, returnAfterFirstElvisDestination, greaterThan(-1)); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java index 353146211f384..84073364845c1 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GeneralCastTests.java @@ -75,7 +75,7 @@ public void testBinaryPrefix() { } /** - * Binary compound postifx with explicit cast + * Binary compound postfix with explicit cast */ public void testBinaryPostfix() { assertEquals((byte)5, exec("long x = 5L; return (byte) (x++);")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java index 4a7a5c77e1cfb..af501cb8bd132 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java @@ -47,7 +47,7 @@ public class PainlessExecuteRequestTests extends AbstractStreamableTestCase docMapper.parse(new SourceToParse("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject() @@ -202,7 +202,7 @@ public void testMultipleLevels() throws Exception { assertEquals("2", doc.rootDoc().getBinaryValue("join_field#child").utf8ToString()); assertEquals("grand_child", doc.rootDoc().getBinaryValue("join_field").utf8ToString()); - // Unkwnown join name + // Unknown join name exc = expectThrows(MapperParsingException.class, () -> docMapper.parse(new SourceToParse("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject() diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index ebebfa01b67bf..2d2e4fa83d790 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -283,7 +283,7 @@ private static BiFunction spanNearQuery() { private static BiFunction spanOrQuery() { return (query, version) -> { SpanOrQuery spanOrQuery = (SpanOrQuery) query; - // handle it like a boolean query to not dulplicate eg. logic + // handle it like a boolean query to not duplicate e.g. logic // about duplicated terms BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (SpanQuery clause : spanOrQuery.getClauses()) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index d4fe0fe1dddd4..e4731919fa7d0 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -269,7 +269,7 @@ private static BytesReference randomSource(Set usedFields) { } @Override - protected boolean isCachable(PercolateQueryBuilder queryBuilder) { + protected boolean isCacheable(PercolateQueryBuilder queryBuilder) { return false; } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java index 8b0ed42acb53e..b58373cc0c648 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java @@ -52,20 +52,20 @@ public class MeanReciprocalRank implements EvaluationMetric { private final int k; /** ratings equal or above this value will be considered relevant */ - private final int relevantRatingThreshhold; + private final int relevantRatingThreshold; public MeanReciprocalRank() { this(DEFAULT_RATING_THRESHOLD, DEFAULT_K); } MeanReciprocalRank(StreamInput in) throws IOException { - this.relevantRatingThreshhold = in.readVInt(); + this.relevantRatingThreshold = in.readVInt(); this.k = in.readVInt(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.relevantRatingThreshhold); + out.writeVInt(this.relevantRatingThreshold); out.writeVInt(this.k); } @@ -82,7 +82,7 @@ public MeanReciprocalRank(int relevantRatingThreshold, int k) { throw new IllegalArgumentException("Window size k must be positive."); } this.k = k; - this.relevantRatingThreshhold = relevantRatingThreshold; + this.relevantRatingThreshold = relevantRatingThreshold; } int getK() { @@ -103,7 +103,7 @@ public String getWriteableName() { * Return the rating threshold above which ratings are considered to be "relevant". */ public int getRelevantRatingThreshold() { - return relevantRatingThreshhold; + return relevantRatingThreshold; } /** @@ -117,7 +117,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, List= this.relevantRatingThreshhold) { + if (rating.getAsInt() >= this.relevantRatingThreshold) { firstRelevant = rank; break; } @@ -155,7 +155,7 @@ public static MeanReciprocalRank fromXContent(XContentParser parser) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.startObject(NAME); - builder.field(RELEVANT_RATING_FIELD.getPreferredName(), this.relevantRatingThreshhold); + builder.field(RELEVANT_RATING_FIELD.getPreferredName(), this.relevantRatingThreshold); builder.field(K_FIELD.getPreferredName(), this.k); builder.endObject(); builder.endObject(); @@ -171,13 +171,13 @@ public final boolean equals(Object obj) { return false; } MeanReciprocalRank other = (MeanReciprocalRank) obj; - return Objects.equals(relevantRatingThreshhold, other.relevantRatingThreshhold) + return Objects.equals(relevantRatingThreshold, other.relevantRatingThreshold) && Objects.equals(k, other.k); } @Override public final int hashCode() { - return Objects.hash(relevantRatingThreshhold, k); + return Objects.hash(relevantRatingThreshold, k); } public static final class Detail implements MetricDetail { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java index bc95b03c8bd13..ead2e3b46b174 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MetricDetail.java @@ -26,7 +26,7 @@ import java.io.IOException; /** - * Details about a specific {@link EvaluationMetric} that should be included in the resonse. + * Details about a specific {@link EvaluationMetric} that should be included in the response. */ public interface MetricDetail extends ToXContentObject, NamedWriteable { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java index bb5a579ead6ee..65ea55c75bacc 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java @@ -59,7 +59,7 @@ public class PrecisionAtK implements EvaluationMetric { private static final int DEFAULT_K = 10; private final boolean ignoreUnlabeled; - private final int relevantRatingThreshhold; + private final int relevantRatingThreshold; private final int k; /** @@ -81,7 +81,7 @@ public PrecisionAtK(int threshold, boolean ignoreUnlabeled, int k) { if (k <= 0) { throw new IllegalArgumentException("Window size k must be positive."); } - this.relevantRatingThreshhold = threshold; + this.relevantRatingThreshold = threshold; this.ignoreUnlabeled = ignoreUnlabeled; this.k = k; } @@ -107,7 +107,7 @@ public PrecisionAtK() { } PrecisionAtK(StreamInput in) throws IOException { - relevantRatingThreshhold = in.readVInt(); + relevantRatingThreshold = in.readVInt(); ignoreUnlabeled = in.readBoolean(); k = in.readVInt(); } @@ -118,7 +118,7 @@ int getK() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(relevantRatingThreshhold); + out.writeVInt(relevantRatingThreshold); out.writeBoolean(ignoreUnlabeled); out.writeVInt(k); } @@ -133,7 +133,7 @@ public String getWriteableName() { * "relevant" for this metric. Defaults to 1. */ public int getRelevantRatingThreshold() { - return relevantRatingThreshhold; + return relevantRatingThreshold; } /** @@ -166,7 +166,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, for (RatedSearchHit hit : ratedSearchHits) { OptionalInt rating = hit.getRating(); if (rating.isPresent()) { - if (rating.getAsInt() >= this.relevantRatingThreshhold) { + if (rating.getAsInt() >= this.relevantRatingThreshold) { truePositives++; } else { falsePositives++; @@ -190,7 +190,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.startObject(NAME); - builder.field(RELEVANT_RATING_FIELD.getPreferredName(), this.relevantRatingThreshhold); + builder.field(RELEVANT_RATING_FIELD.getPreferredName(), this.relevantRatingThreshold); builder.field(IGNORE_UNLABELED_FIELD.getPreferredName(), this.ignoreUnlabeled); builder.field(K_FIELD.getPreferredName(), this.k); builder.endObject(); @@ -207,14 +207,14 @@ public final boolean equals(Object obj) { return false; } PrecisionAtK other = (PrecisionAtK) obj; - return Objects.equals(relevantRatingThreshhold, other.relevantRatingThreshhold) + return Objects.equals(relevantRatingThreshold, other.relevantRatingThreshold) && Objects.equals(k, other.k) && Objects.equals(ignoreUnlabeled, other.ignoreUnlabeled); } @Override public final int hashCode() { - return Objects.hash(relevantRatingThreshhold, ignoreUnlabeled, k); + return Objects.hash(relevantRatingThreshold, ignoreUnlabeled, k); } public static final class Detail implements MetricDetail { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 2fe3f1922d509..15d28bc58ec91 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -94,7 +94,7 @@ public class RatedRequest implements Writeable, ToXContentObject { * @param id a unique name for this rated request * @param ratedDocs a list of document ratings * @param params template parameters - * @param templateId a templare id + * @param templateId a template id */ public RatedRequest(String id, List ratedDocs, Map params, String templateId) { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index b9e7bf25aaf7a..ae20971ff99ec 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -156,8 +156,8 @@ public void testNoResults() throws Exception { public void testParseFromXContent() throws IOException { String xContent = " {\n" + " \"relevant_rating_threshold\" : 2" + "}"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { - PrecisionAtK precicionAt = PrecisionAtK.fromXContent(parser); - assertEquals(2, precicionAt.getRelevantRatingThreshold()); + PrecisionAtK precisionAt = PrecisionAtK.fromXContent(parser); + assertEquals(2, precisionAt.getRelevantRatingThreshold()); } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index cc5b554e39e93..fbb06b96de2b8 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -154,7 +154,7 @@ public void testPrecisionAtRequest() { builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); - // if we look only at top 3 documente, the expected P@3 for the first query is + // if we look only at top 3 documents, the expected P@3 for the first query is // 2/3 and the expected Prec@ for the second is 1/3, divided by 2 to get the average expectedPrecision = (1.0 / 3.0 + 2.0 / 3.0) / 2.0; assertEquals(expectedPrecision, response.getMetricScore(), Double.MIN_VALUE); diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 96b49e0a50c62..2d0bf55ee1c23 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -71,7 +71,7 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] -// Support for testing reindex-from-remote against old Elaticsearch versions +// Support for testing reindex-from-remote against old Elasticsearch versions configurations { oldesFixture es2 diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index c077c992beb60..0c84b55af0fa7 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -110,7 +110,7 @@ public void testResponseOnSearchFailure() throws Exception { /* * In the past we've seen the delete of the source index * actually take effect *during* the `indexDocs` call in - * the next step. This breaks things pretty disasterously + * the next step. This breaks things pretty disastrously * so we *try* and wait for the delete to be fully * complete here. */ diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java index 3c2f5194fceda..84f9873be946b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/TransportRethrottleActionTests.java @@ -101,7 +101,7 @@ public void testRethrottleSuccessfulResponse() { List tasks = new ArrayList<>(); List sliceStatuses = new ArrayList<>(slices); for (int i = 0; i < slices; i++) { - BulkByScrollTask.Status status = believeableInProgressStatus(i); + BulkByScrollTask.Status status = believableInProgressStatus(i); tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()), Collections.emptyMap())); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); @@ -115,14 +115,14 @@ public void testRethrottleWithSomeSucceeded() { int succeeded = between(1, slices - 1); List sliceStatuses = new ArrayList<>(slices); for (int i = 0; i < succeeded; i++) { - BulkByScrollTask.Status status = believeableCompletedStatus(i); + BulkByScrollTask.Status status = believableCompletedStatus(i); task.getLeaderState().onSliceResponse(neverCalled(), i, new BulkByScrollResponse(timeValueMillis(10), status, emptyList(), emptyList(), false)); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); } List tasks = new ArrayList<>(); for (int i = succeeded; i < slices; i++) { - BulkByScrollTask.Status status = believeableInProgressStatus(i); + BulkByScrollTask.Status status = believableInProgressStatus(i); tasks.add(new TaskInfo(new TaskId("test", 123), "test", "test", "test", status, 0, 0, true, new TaskId("test", task.getId()), Collections.emptyMap())); sliceStatuses.add(new BulkByScrollTask.StatusOrException(status)); @@ -137,7 +137,7 @@ public void testRethrottleWithAllSucceeded() { for (int i = 0; i < slices; i++) { @SuppressWarnings("unchecked") ActionListener listener = i < slices - 1 ? neverCalled() : mock(ActionListener.class); - BulkByScrollTask.Status status = believeableCompletedStatus(i); + BulkByScrollTask.Status status = believableCompletedStatus(i); task.getLeaderState().onSliceResponse(listener, i, new BulkByScrollResponse(timeValueMillis(10), status, emptyList(), emptyList(), false)); if (i == slices - 1) { @@ -179,11 +179,11 @@ public void testRethrottleNodeFailure() { expectException(theInstance(e))); } - private BulkByScrollTask.Status believeableInProgressStatus(Integer sliceId) { + private BulkByScrollTask.Status believableInProgressStatus(Integer sliceId) { return new BulkByScrollTask.Status(sliceId, 10, 0, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), 0, null, timeValueMillis(0)); } - private BulkByScrollTask.Status believeableCompletedStatus(Integer sliceId) { + private BulkByScrollTask.Status believableCompletedStatus(Integer sliceId) { return new BulkByScrollTask.Status(sliceId, 10, 10, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), 0, null, timeValueMillis(0)); } @@ -196,7 +196,7 @@ public void onResponse(T response) { @Override public void onFailure(Exception e) { - throw new RuntimeException("Expected no interations but was received a failure", e); + throw new RuntimeException("Expected no interactions but was received a failure", e); } }; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 2f801811327b8..476fc796ba36b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -56,7 +56,7 @@ * features from this file just because the version constants have been removed. */ public class RemoteRequestBuildersTests extends ESTestCase { - public void testIntialSearchPath() { + public void testInitialSearchPath() { Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); BytesReference query = new BytesArray("{}"); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index 609186fc3c30e..74a3a2c6c70a8 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -70,7 +70,7 @@ public List> getSettings() { public Settings additionalSettings() { return Settings.builder() // here we set the netty4 transport and http transport as the default. This is a set once setting - // ie. if another plugin does that as well the server will fail - only one default network can exist! + // i.e. if another plugin does that as well the server will fail - only one default network can exist! .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), NETTY_HTTP_TRANSPORT_NAME) .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), NETTY_TRANSPORT_NAME) .build(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index cfda71f10096e..2c64cb0508047 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -52,18 +52,18 @@ public void testBadRequest() throws IOException { for (Map.Entry entry : map.entrySet()) { @SuppressWarnings("unchecked") final Map settings = (Map)((Map)entry.getValue()).get("settings"); - final int maxIntialLineLength; + final int maxInitialLineLength; if (settings.containsKey("http")) { @SuppressWarnings("unchecked") final Map httpSettings = (Map)settings.get("http"); if (httpSettings.containsKey(key)) { - maxIntialLineLength = ByteSizeValue.parseBytesSizeValue((String)httpSettings.get(key), key).bytesAsInt(); + maxInitialLineLength = ByteSizeValue.parseBytesSizeValue((String)httpSettings.get(key), key).bytesAsInt(); } else { - maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + maxInitialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); } } else { - maxIntialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); + maxInitialLineLength = httpMaxInitialLineLength.getDefault(Settings.EMPTY).bytesAsInt(); } - maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxIntialLineLength); + maxMaxInitialLineLength = Math.max(maxMaxInitialLineLength, maxInitialLineLength); } final String path = "/" + new String(new byte[maxMaxInitialLineLength], Charset.forName("UTF-8")).replace('\0', 'a'); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 200c9aa4bbeb4..71585ea7a4e8e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -59,7 +59,7 @@ public void testLoggingHandler() throws IllegalAccessException { ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final MockLogAppender.LoggingExpectation flushExpectation = @@ -74,7 +74,7 @@ public void testLoggingHandler() throws IllegalAccessException { " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java index a261e8b3b7e9a..7275df1bf68d5 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/CollationFieldTypeTests.java @@ -45,7 +45,7 @@ protected MappedFieldType createDefaultFieldType() { public void testIsFieldWithinQuery() throws IOException { CollationFieldType ft = new CollationFieldType(); - // current impl ignores args and shourd always return INTERSECTS + // current impl ignores args and should always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, RandomStrings.randomAsciiOfLengthBetween(random(), 0, 5), RandomStrings.randomAsciiOfLengthBetween(random(), 0, 5), diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java index e9268f7306512..f5988b4a9d469 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java @@ -42,13 +42,13 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { private final String nBestExamples; private final int nBestCost; - private boolean discartPunctuation; + private boolean discardPunctuation; public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, settings); mode = getMode(settings); userDictionary = getUserDictionary(env, settings); - discartPunctuation = settings.getAsBoolean("discard_punctuation", true); + discardPunctuation = settings.getAsBoolean("discard_punctuation", true); nBestCost = settings.getAsInt(NBEST_COST, -1); nBestExamples = settings.get(NBEST_EXAMPLES); } @@ -87,7 +87,7 @@ public static JapaneseTokenizer.Mode getMode(Settings settings) { @Override public Tokenizer create() { - JapaneseTokenizer t = new JapaneseTokenizer(userDictionary, discartPunctuation, mode); + JapaneseTokenizer t = new JapaneseTokenizer(userDictionary, discardPunctuation, mode); int nBestCost = this.nBestCost; if (nBestExamples != null) { nBestCost = Math.max(nBestCost, t.calcNBestCost(nBestExamples)); diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java index a45549c22bd97..b287a1ad25499 100644 --- a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java @@ -33,7 +33,7 @@ public class UkrainianAnalysisTests extends ESTestCase { - public void testDefaultsUkranianAnalysis() throws IOException { + public void testDefaultsUkrainianAnalysis() throws IOException { final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisUkrainianPlugin()); diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 795db2846cef3..03bbcf91fe10e 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -52,7 +52,7 @@ public AzureDiscoveryPlugin(Settings settings) { logger.trace("starting azure classic discovery plugin..."); } - // overrideable for tests + // overridable for tests protected AzureComputeService createComputeService() { return new AzureComputeServiceImpl(settings); } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index e91dff713b4f0..179fcff7ac16d 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -29,7 +29,7 @@ import static org.hamcrest.CoreMatchers.is; /** - * Just an empty Node Start test to check eveything if fine when + * Just an empty Node Start test to check everything if fine when * starting. * This test requires AWS to run. */ diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index e765a66486b03..5d61ef1ecffb1 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -172,7 +172,7 @@ protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurit if (validateCerts) { gceHttpTransport = GoogleNetHttpTransport.newTrustedTransport(); } else { - // this is only used for testing - alternative we could use the defaul keystore but this requires special configs too.. + // this is only used for testing - alternative we could use the default keystore but this requires special configs too.. gceHttpTransport = new NetHttpTransport.Builder().doNotValidateCertificate().build(); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 9aef304e08fcc..806a60ea330da 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -78,7 +78,7 @@ public GceDiscoveryPlugin(Settings settings) { logger.trace("starting gce discovery plugin..."); } - // overrideable for tests + // overridable for tests protected GceInstancesService createGceInstancesService() { return new GceInstancesServiceImpl(settings); } diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/cloud/gce/GceInstancesServiceImplTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/cloud/gce/GceInstancesServiceImplTests.java index efb9b6c03d875..d97809e105b84 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/cloud/gce/GceInstancesServiceImplTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/cloud/gce/GceInstancesServiceImplTests.java @@ -36,7 +36,7 @@ public class GceInstancesServiceImplTests extends ESTestCase { public void testHeaderContainsMetadataFlavor() throws Exception { - final AtomicBoolean addMetdataFlavor = new AtomicBoolean(); + final AtomicBoolean addMetadataFlavor = new AtomicBoolean(); final MockHttpTransport transport = new MockHttpTransport() { @Override public LowLevelHttpRequest buildRequest(String method, final String url) { @@ -47,7 +47,7 @@ public LowLevelHttpResponse execute() { response.setStatusCode(200); response.setContentType(Json.MEDIA_TYPE); response.setContent("value"); - if (addMetdataFlavor.get()) { + if (addMetadataFlavor.get()) { response.addHeader("Metadata-Flavor", "Google"); } return response; @@ -66,7 +66,7 @@ protected synchronized HttpTransport getGceHttpTransport() { final String serviceURL = "/computeMetadata/v1/project/project-id"; assertThat(service.getAppEngineValueFromMetadataServer(serviceURL), is(nullValue())); - addMetdataFlavor.set(true); + addMetadataFlavor.set(true); assertThat(service.getAppEngineValueFromMetadataServer(serviceURL), is("value")); } } diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java index b4445d16b7769..5c9472b2a5c5d 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java @@ -53,12 +53,12 @@ public void testFiles() throws Exception { try (DirectoryStream stream = Files.newDirectoryStream(tmp)) { for (Path doc : stream) { logger.debug("parsing: {}", doc); - assertParseable(doc); + assertParsable(doc); } } } - void assertParseable(Path fileName) throws Exception { + void assertParsable(Path fileName) throws Exception { try { byte bytes[] = Files.readAllBytes(fileName); String parsedContent = TikaImpl.parse(bytes, new Metadata(), -1); diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java index ca29521802fe2..bdc22392444c9 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java @@ -65,10 +65,10 @@ private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, // Annotated fields wrap the usual analyzer with one that injects extra tokens Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); - AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); - hiliteAnalyzer.init(markedUpInputs); - PassageFormatter passageFormatter = new AnnotatedPassageFormatter(hiliteAnalyzer,new DefaultEncoder()); - String []plainTextForHighlighter = hiliteAnalyzer.getPlainTextValuesForHighlighter(); + AnnotatedHighlighterAnalyzer highlightAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); + highlightAnalyzer.init(markedUpInputs); + PassageFormatter passageFormatter = new AnnotatedPassageFormatter(highlightAnalyzer,new DefaultEncoder()); + String []plainTextForHighlighter = highlightAnalyzer.getPlainTextValuesForHighlighter(); Directory dir = newDirectory(); @@ -96,7 +96,7 @@ private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, assertThat(topDocs.totalHits.value, equalTo(1L)); String rawValue = Strings.arrayToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); - CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, hiliteAnalyzer, null, + CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, highlightAnalyzer, null, passageFormatter, locale, breakIterator, rawValue, noMatchSize); highlighter.setFieldMatcher((name) -> "text".equals(name)); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 0c14f44d8b613..3e3ac79ed402d 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -38,8 +38,8 @@ public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsDefaultToInstanceProviders() { - final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); - final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, inexistentClientName); + final String nonexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, nonexistentClientName); final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, clientSettings); assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java index 826bfd6585f42..fac509a0e868a 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java @@ -57,7 +57,7 @@ public void testLoggingHandler() throws IllegalAccessException { ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final String readPattern = @@ -69,7 +69,7 @@ public void testLoggingHandler() throws IllegalAccessException { " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java index fda61e9fb36e5..e07f8ef3c7ab4 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java @@ -88,7 +88,7 @@ public static void cleanEverything() { .forEach(FileUtils::rm); // disable elasticsearch service - // todo add this for windows when adding tests for service intallation + // todo add this for windows when adding tests for service installation if (Platforms.LINUX && isSystemd()) { sh.run("systemctl unmask systemd-sysctl.service"); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index dfdc00680828f..feb0fd6f17c6b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -141,7 +141,7 @@ }, "scroll_size": { "type": "number", - "defaut_value": 100, + "default_value": 100, "description": "Size on the scroll request powering the delete by query" }, "wait_for_completion": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json index 77d9e037163fa..b6fe82259bb75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json @@ -14,7 +14,7 @@ "params": { "force": { "type" : "boolean", - "description" : "Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal)" + "description" : "Whether a flush should be forced even if it is not necessarily needed i.e. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal)" }, "wait_if_ongoing": { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 13a6005c9a189..0bd08cf54f46c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -31,7 +31,7 @@ }, "pre_filter_shard_size" : { "type" : "number", - "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.", + "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method i.e. if date filters are mandatory to match but the shard bounds and the query are disjoint.", "default" : 128 }, "max_concurrent_shard_requests" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 5834ca623a99b..a65e5647b3f39 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -180,7 +180,7 @@ }, "pre_filter_shard_size" : { "type" : "number", - "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint.", + "description" : "A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on it's rewrite method i.e. if date filters are mandatory to match but the shard bounds and the query are disjoint.", "default" : 128 }, "rest_total_hits_as_int" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 427a7e04ad8fb..ac63cb76ead6e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -149,7 +149,7 @@ }, "scroll_size": { "type": "number", - "defaut_value": 100, + "default_value": 100, "description": "Size on the scroll request powering the update by query" }, "wait_for_completion": { diff --git a/server/build.gradle b/server/build.gradle index c3a8958f3d8a8..a110f522c1380 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -107,7 +107,7 @@ dependencies { // percentiles aggregation compile 'com.tdunning:t-digest:3.2' - // precentil ranks aggregation + // percentile ranks aggregation compile 'org.hdrhistogram:HdrHistogram:2.1.9' // lucene spatial diff --git a/server/src/main/java/org/apache/lucene/geo/XTessellator.java b/server/src/main/java/org/apache/lucene/geo/XTessellator.java index 48091439ba98f..88e43ea8e897c 100644 --- a/server/src/main/java/org/apache/lucene/geo/XTessellator.java +++ b/server/src/main/java/org/apache/lucene/geo/XTessellator.java @@ -435,7 +435,7 @@ && isLocallyInside(a, b) && isLocallyInside(b, a)) { return node; } - /** Attempt to split a polygon and independently triangulate each side. Return true if the polygon was splitted **/ + /** Attempt to split a polygon and independently triangulate each side. Return true if the polygon was split **/ private static boolean splitEarcut(final Node start, final List tessellation, final boolean mortonIndexed) { // Search for a valid diagonal that divides the polygon into two. Node searchNode = start; @@ -633,7 +633,7 @@ private static void tathamSort(Node list) { } while (numMerges > 1); } - /** Eliminate colinear/duplicate points from the doubly linked list */ + /** Eliminate collinear/duplicate points from the doubly linked list */ private static Node filterPoints(final Node start, Node end) { if (start == null) { return start; diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index dd3ac992475b9..2449c37e439aa 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -47,7 +47,7 @@ * significant in other fields like in a scenario where documents represent * users with a "first_name" and a "second_name". When someone searches * for "simon" it will very likely get "paul simon" first since "simon" is a - * an uncommon last name ie. has a low document frequency. This query + * an uncommon last name i.e. has a low document frequency. This query * tries to "lie" about the global statistics like document frequency as well * total term frequency to rank based on the estimated statistics. *

    @@ -110,7 +110,7 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader // we use the max here since it's the only "true" estimation we can make here // at least max(df) documents have that term. Sum or Averages don't seem // to have a significant meaning here. - // TODO: Maybe it could also make sense to assume independent distributions of documents and eg. have: + // TODO: Maybe it could also make sense to assume independent distributions of documents and e.g. have: // df = df1 + df2 - (df1 * df2 / maxDoc)? max = Math.max(df, max); if (minSumTTF != -1 && ctx.totalTermFreq() != -1) { diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index e28d8990c91e3..d0d0e0f2c5373 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -37,7 +37,7 @@ * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. * - * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. + * TODO: If the sort is based on score we should propagate the minimum competitive score when orderedGroups is full. * This is safe for collapsing since the group sort is the same as the query sort. */ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollector { diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index d18d4d4820f7d..fb5e9be123fe7 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -626,7 +626,7 @@ public static ElasticsearchException[] guessRootCauses(Throwable t) { * parsing exception because that is generally the most interesting * exception to return to the user. If that exception is caused by * an ElasticsearchException we'd like to keep unwrapping because - * ElasticserachExceptions tend to contain useful information for + * ElasticsearchExceptions tend to contain useful information for * the user. */ Throwable cause = ex.getCause(); diff --git a/server/src/main/java/org/elasticsearch/SpecialPermission.java b/server/src/main/java/org/elasticsearch/SpecialPermission.java index 9e5571a5b0af9..1782d3c1dc1b2 100644 --- a/server/src/main/java/org/elasticsearch/SpecialPermission.java +++ b/server/src/main/java/org/elasticsearch/SpecialPermission.java @@ -61,7 +61,7 @@ public final class SpecialPermission extends BasicPermission { public static final SpecialPermission INSTANCE = new SpecialPermission(); /** - * Creates a new SpecialPermision object. + * Creates a new SpecialPermission object. */ public SpecialPermission() { // TODO: if we really need we can break out name (e.g. "hack" or "scriptEngineService" or whatever). diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 8f4d799713b09..adc25c1d9b3ae 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -410,7 +410,7 @@ public Version minimumCompatibilityVersion() { /** * Returns the minimum created index version that this version supports. Indices created with lower versions - * can't be used with this version. This should also be used for file based serialization backwards compatibility ie. on serialization + * can't be used with this version. This should also be used for file based serialization backwards compatibility i.e. on serialization * code that is used to read / write file formats like transaction logs, cluster state, and index metadata. */ public Version minimumIndexCompatibilityVersion() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 834e238e4a0d3..5159f334250a6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -189,7 +189,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws stage = SnapshotIndexShardStage.valueOf(rawStage); } catch (IllegalArgumentException iae) { throw new ElasticsearchParseException( - "failed to parse snapshot index shard status [{}][{}], unknonwn stage [{}]", + "failed to parse snapshot index shard status [{}][{}], unknown stage [{}]", shard.getIndex().getName(), shard.getId(), rawStage); } return new SnapshotIndexShardStatus(shard, stage, stats, nodeId, failure); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index 0c99175387db0..644154b2f349c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -74,13 +74,13 @@ public IndicesOptions indicesOptions() { return indicesOptions; } - public IndicesExistsRequest expandWilcardsOpen(boolean expandWildcardsOpen) { + public IndicesExistsRequest expandWildcardsOpen(boolean expandWildcardsOpen) { this.indicesOptions = IndicesOptions.fromOptions(indicesOptions.ignoreUnavailable(), indicesOptions.allowNoIndices(), expandWildcardsOpen, indicesOptions.expandWildcardsClosed()); return this; } - public IndicesExistsRequest expandWilcardsClosed(boolean expandWildcardsClosed) { + public IndicesExistsRequest expandWildcardsClosed(boolean expandWildcardsClosed) { this.indicesOptions = IndicesOptions.fromOptions(indicesOptions.ignoreUnavailable(), indicesOptions.allowNoIndices(), indicesOptions.expandWildcardsOpen(), expandWildcardsClosed); return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 39a2ca7ef0a13..f5cd87b3b9ac4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -38,7 +38,7 @@ public IndicesExistsRequestBuilder setIndices(String... indices) { * Controls whether wildcard expressions will be expanded to existing open indices */ public IndicesExistsRequestBuilder setExpandWildcardsOpen(boolean expandWildcardsOpen) { - request.expandWilcardsOpen(expandWildcardsOpen); + request.expandWildcardsOpen(expandWildcardsOpen); return this; } @@ -46,7 +46,7 @@ public IndicesExistsRequestBuilder setExpandWildcardsOpen(boolean expandWildcard * Controls whether wildcard expressions will be expanded to existing closed indices */ public IndicesExistsRequestBuilder setExpandWildcardsClosed(boolean expandWildcardsClosed) { - request.expandWilcardsClosed(expandWildcardsClosed); + request.expandWildcardsClosed(expandWildcardsClosed); return this; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index c10df6aafa557..f1c22720af9cb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -102,7 +102,7 @@ public PutIndexTemplateRequestBuilder setSettings(Settings.Builder settings) { } /** - * The settings to crete the index template with (either json or yaml format) + * The settings to create the index template with (either json or yaml format) */ public PutIndexTemplateRequestBuilder setSettings(String source, XContentType xContentType) { request.settings(source, xContentType); @@ -110,7 +110,7 @@ public PutIndexTemplateRequestBuilder setSettings(String source, XContentType xC } /** - * The settings to crete the index template with (either json or yaml format) + * The settings to create the index template with (either json or yaml format) */ public PutIndexTemplateRequestBuilder setSettings(Map source) { request.settings(source); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index efb08a01e43ab..c197175472b79 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -115,7 +115,7 @@ public String getDescription() { public void onRetry() { for (BulkItemRequest item : items) { if (item.request() instanceof ReplicationRequest) { - // all replication requests need to be notified here as well to ie. make sure that internal optimizations are + // all replication requests need to be notified here as well to i.e. make sure that internal optimizations are // disabled see IndexRequest#canHaveDuplicates() ((ReplicationRequest) item.request()).onRetry(); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index 5cfdba9294634..8db7ac9709fab 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -243,19 +243,19 @@ static class Builder { private String type; private boolean isSearchable; private boolean isAggregatable; - private List indiceList; + private List indicesList; Builder(String name, String type) { this.name = name; this.type = type; this.isSearchable = true; this.isAggregatable = true; - this.indiceList = new ArrayList<>(); + this.indicesList = new ArrayList<>(); } void add(String index, boolean search, boolean agg) { IndexCaps indexCaps = new IndexCaps(index, search, agg); - indiceList.add(indexCaps); + indicesList.add(indexCaps); this.isSearchable &= search; this.isAggregatable &= agg; } @@ -264,9 +264,9 @@ FieldCapabilities build(boolean withIndices) { final String[] indices; /* Eclipse can't deal with o -> o.name, maybe because of * https://bugs.eclipse.org/bugs/show_bug.cgi?id=511750 */ - Collections.sort(indiceList, Comparator.comparing((IndexCaps o) -> o.name)); + Collections.sort(indicesList, Comparator.comparing((IndexCaps o) -> o.name)); if (withIndices) { - indices = indiceList.stream() + indices = indicesList.stream() .map(caps -> caps.name) .toArray(String[]::new); } else { @@ -275,10 +275,10 @@ FieldCapabilities build(boolean withIndices) { final String[] nonSearchableIndices; if (isSearchable == false && - indiceList.stream().anyMatch((caps) -> caps.isSearchable)) { + indicesList.stream().anyMatch((caps) -> caps.isSearchable)) { // Iff this field is searchable in some indices AND non-searchable in others // we record the list of non-searchable indices - nonSearchableIndices = indiceList.stream() + nonSearchableIndices = indicesList.stream() .filter((caps) -> caps.isSearchable == false) .map(caps -> caps.name) .toArray(String[]::new); @@ -288,10 +288,10 @@ FieldCapabilities build(boolean withIndices) { final String[] nonAggregatableIndices; if (isAggregatable == false && - indiceList.stream().anyMatch((caps) -> caps.isAggregatable)) { + indicesList.stream().anyMatch((caps) -> caps.isAggregatable)) { // Iff this field is aggregatable in some indices AND non-searchable in others // we keep the list of non-aggregatable indices - nonAggregatableIndices = indiceList.stream() + nonAggregatableIndices = indicesList.stream() .filter((caps) -> caps.isAggregatable == false) .map(caps -> caps.name) .toArray(String[]::new); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index e9e77df5f9030..3204685046e94 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -44,7 +44,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); private String[] fields = Strings.EMPTY_ARRAY; - // pkg private API mainly for cross cluster search to signal that we do multiple reductions ie. the results should not be merged + // pkg private API mainly for cross cluster search to signal that we do multiple reductions i.e. the results should not be merged private boolean mergeResults = true; private static ObjectParser PARSER = diff --git a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index 30ad0529f5c34..cb5d22a9eed95 100644 --- a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -304,7 +304,7 @@ private void onShardResult(FirstResult result, SearchShardIterator shardIt) { onShardSuccess(result); // we need to increment successful ops first before we compare the exit condition otherwise if we // are fast we could concurrently update totalOps but then preempt one of the threads which can - // cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc. + // cause the successor to read a wrong value from successfulOps if second phase is very fast i.e. count etc. // increment all the "future" shards to update the total ops since we some may work and some may not... // and when that happens, we break on total ops, so we must maintain them successfulShardExecution(shardIt); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9789e03c83641..ceda914dfbf57 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -478,7 +478,7 @@ public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { /** * Sets a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for - * instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard + * instance a shard can not match any documents based on it's rewrite method i.e. if date filters are mandatory to match but the shard * bounds and the query are disjoint. The default is {@code 128} */ public void setPreFilterShardSize(int preFilterShardSize) { @@ -491,7 +491,7 @@ public void setPreFilterShardSize(int preFilterShardSize) { /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for - * instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard + * instance a shard can not match any documents based on it's rewrite method i.e. if date filters are mandatory to match but the shard * bounds and the query are disjoint. The default is {@code 128} */ public int getPreFilterShardSize() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 732debf2a1305..e9d56071b8533 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -550,7 +550,7 @@ public SearchRequestBuilder setMaxConcurrentShardRequests(int maxConcurrentShard /** * Sets a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for - * instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard + * instance a shard can not match any documents based on it's rewrite method i.e. if date filters are mandatory to match but the shard * bounds and the query are disjoint. The default is {@code 128} */ public SearchRequestBuilder setPreFilterShardSize(int preFilterShardSize) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 81d7e66d19edb..66f13cf46160c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -243,7 +243,7 @@ protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryP final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(), fetchResults::get); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids - // such that we can talk to them abgain in the next roundtrip. + // such that we can talk to them again in the next roundtrip. String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 679bad1642e53..5d32d1e867cab 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -104,7 +104,7 @@ protected TransportMasterNodeAction(String actionName, boolean canTripCircuitBre protected abstract String executor(); /** - * @deprecated new implementors should override {@link #read(StreamInput)} and use the + * @deprecated new implementers should override {@link #read(StreamInput)} and use the * {@link Writeable.Reader} interface. * @return a new response instance. Typically this is used for serialization using the * {@link Streamable#readFrom(StreamInput)} method. diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 7d13cff2ebd09..62f72a3774582 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -57,7 +57,7 @@ *

  • String : "TV"
  • *
  • vint: version (=-1)
  • *
  • boolean: hasTermStatistics (are the term statistics stored?)
  • - *
  • boolean: hasFieldStatitsics (are the field statistics stored?)
  • + *
  • boolean: hasFieldStatistics (are the field statistics stored?)
  • *
  • vint: number of fields
  • *
  • *
      @@ -112,8 +112,8 @@ *
    • vint: endOffset_1 (if offset)
    • *
    • BytesRef: payload_1 (if payloads)
    • *
    • ...
    • - *
    • vint: endOffset_freqency (if offset)
    • - *
    • BytesRef: payload_freqency (if payloads)
    • + *
    • vint: endOffset_frequency (if offset)
    • + *
    • BytesRef: payload_frequency (if payloads)
    • *
  • * */ @@ -457,7 +457,7 @@ public int startOffset() throws IOException { } @Override - // can return -1 if posistions were not requested or + // can return -1 if positions were not requested or // stored but offsets were stored and requested public int nextPosition() throws IOException { assert curPos + 1 < freq; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index eb53cbaef70ba..59623e90d3526 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -90,7 +90,7 @@ static void check(final BootstrapContext context, final BoundTransportAddress bo * property {@code es.enforce.bootstrap.checks} is set to {@code true} then the bootstrap checks will be enforced regardless of whether * or not the transport protocol is bound to a non-loopback interface. * - * @param context the current node boostrap context + * @param context the current node bootstrap context * @param enforceLimits {@code true} if the checks should be enforced or otherwise warned * @param checks the checks to execute */ @@ -106,7 +106,7 @@ static void check( * property {@code es.enforce.bootstrap.checks }is set to {@code true} then the bootstrap checks will be enforced regardless of whether * or not the transport protocol is bound to a non-loopback interface. * - * @param context the current node boostrap context + * @param context the current node bootstrap context * @param enforceLimits {@code true} if the checks should be enforced or otherwise warned * @param checks the checks to execute * @param logger the logger to diff --git a/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java b/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java index 6fc3349c76233..420cebdc1501c 100644 --- a/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java @@ -86,7 +86,7 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { execute(terminal, options, createEnv(settings)); } - /** Create an {@link Environment} for the command to use. Overrideable for tests. */ + /** Create an {@link Environment} for the command to use. Overridable for tests. */ protected Environment createEnv(final Map settings) throws UserException { final String esPathConf = System.getProperty("es.path.conf"); if (esPathConf == null) { diff --git a/server/src/main/java/org/elasticsearch/client/FilterClient.java b/server/src/main/java/org/elasticsearch/client/FilterClient.java index b4230710414be..e32a825eae082 100644 --- a/server/src/main/java/org/elasticsearch/client/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/FilterClient.java @@ -47,7 +47,7 @@ public FilterClient(Client in) { /** * A Constructor that allows to pass settings and threadpool separately. This is useful if the - * client is a proxy and not yet fully constructed ie. both dependencies are not available yet. + * client is a proxy and not yet fully constructed i.e. both dependencies are not available yet. */ protected FilterClient(Settings settings, ThreadPool threadPool, Client in) { super(settings, threadPool); diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index b5720c023f095..c6202d7b1e72f 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -127,7 +127,7 @@ protected static Collection> addPlugins(Collection> plugins, HostFailureListener failureListner) { + Collection> plugins, HostFailureListener failureListener) { if (Node.NODE_NAME_SETTING.exists(providedSettings) == false) { providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build(); } @@ -206,8 +206,8 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings Injector injector = modules.createInjector(); final TransportClientNodesService nodesService = - new TransportClientNodesService(settings, transportService, threadPool, failureListner == null - ? (t, e) -> {} : failureListner); + new TransportClientNodesService(settings, transportService, threadPool, failureListener == null + ? (t, e) -> {} : failureListener); // construct the list of client actions final List actionPlugins = pluginsService.filterPlugins(ActionPlugin.class); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index 701656db9ce8f..2a0998f1f173e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -195,7 +195,7 @@ public boolean blocksChanged() { } /** - * Returns true iff the local node is the mater node of the cluster. + * Returns true iff the local node is the master node of the cluster. */ public boolean localNodeMaster() { return state.nodes().isLocalNodeElectedMaster(); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 14493e276ed48..17f2cd66a911f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -97,7 +97,7 @@ public class ClusterState implements ToXContentFragment, Diffable public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); /** - * An interface that implementors use when a class requires a client to maybe have a feature. + * An interface that implementers use when a class requires a client to maybe have a feature. */ public interface FeatureAware { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index bdbbbf0fa0ccd..5258fc6179708 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -55,7 +55,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { * * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. - * This allows groupd task description but the submitting source. + * This allows grouped task description by the submitting source. */ default String describeTasks(List tasks) { return String.join(", ", tasks.stream().map(t -> (CharSequence)t.toString()).filter(t -> t.length() > 0)::iterator); diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index e8261ca9f09cf..74e9c30d8f138 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -288,11 +288,11 @@ public final ClusterInfo refresh() { final CountDownLatch nodeLatch = updateNodeStats(new ActionListener() { @Override public void onResponse(NodesStatsResponse nodeStatses) { - ImmutableOpenMap.Builder newLeastAvaiableUsages = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder newMostAvaiableUsages = ImmutableOpenMap.builder(); - fillDiskUsagePerNode(logger, nodeStatses.getNodes(), newLeastAvaiableUsages, newMostAvaiableUsages); - leastAvailableSpaceUsages = newLeastAvaiableUsages.build(); - mostAvailableSpaceUsages = newMostAvaiableUsages.build(); + ImmutableOpenMap.Builder newLeastAvailableUsages = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder newMostAvailableUsages = ImmutableOpenMap.builder(); + fillDiskUsagePerNode(logger, nodeStatses.getNodes(), newLeastAvailableUsages, newMostAvailableUsages); + leastAvailableSpaceUsages = newLeastAvailableUsages.build(); + mostAvailableSpaceUsages = newMostAvailableUsages.build(); } @Override @@ -382,8 +382,8 @@ static void buildShardLevelInfo(Logger logger, ShardStats[] stats, ImmutableOpen } static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, - ImmutableOpenMap.Builder newLeastAvaiableUsages, - ImmutableOpenMap.Builder newMostAvaiableUsages) { + ImmutableOpenMap.Builder newLeastAvailableUsages, + ImmutableOpenMap.Builder newMostAvailableUsages) { for (NodeStats nodeStats : nodeStatsArray) { if (nodeStats.getFs() == null) { logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().getName()); @@ -414,7 +414,7 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, nodeId, leastAvailablePath.getTotal().getBytes()); } } else { - newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), + newLeastAvailableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().getBytes(), leastAvailablePath.getAvailable().getBytes())); } if (mostAvailablePath.getTotal().getBytes() < 0) { @@ -423,7 +423,7 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, nodeId, mostAvailablePath.getTotal().getBytes()); } } else { - newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), + newMostAvailableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().getBytes(), mostAvailablePath.getAvailable().getBytes())); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 36871799eddf7..74752e00b29e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -328,7 +328,7 @@ private void updateMaxTermSeen(final long term) { // Bump our term. However if there is a publication in flight then doing so would cancel the publication, so don't do that // since we check whether a term bump is needed at the end of the publication too. if (publicationInProgress()) { - logger.debug("updateMaxTermSeen: maxTermSeen = {} > currentTerm = {}, enqueueing term bump", + logger.debug("updateMaxTermSeen: maxTermSeen = {} > currentTerm = {}, enqueuing term bump", maxTermSeen, currentTerm); } else { try { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d9120342cf4cd..dd75eae5fe5ef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -722,7 +722,7 @@ static IndexMetaData validateResize(ClusterState state, String sourceIndex, } if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { - // this method applies all necessary checks ie. if the target shards are less than the source shards + // this method applies all necessary checks i.e. if the target shards are less than the source shards // of if the source shards are divisible by the number of target shards IndexMetaData.getRoutingFactor(sourceMetaData.getNumberOfShards(), IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); @@ -797,7 +797,7 @@ static void prepareResizeIndexSettings( public static int calculateNumRoutingShards(int numShards, Version indexVersionCreated) { if (indexVersionCreated.onOrAfter(Version.V_7_0_0)) { // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour - // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters + // until we have a fully upgraded cluster. Additionally it will make integration testing easier since mixed clusters // will always have the behavior of the min node in the cluster. // // We use as a default number of routing shards the higher number that can be expressed diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index bfc4ce0618833..0c79b3e8f2490 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -178,7 +178,7 @@ public boolean relocating() { } /** - * Returns true iff this shard is assigned to a node ie. not + * Returns true iff this shard is assigned to a node i.e. not * {@link ShardRoutingState#UNASSIGNED unassigned}. Otherwise false */ public boolean assignedToNode() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index ad5db788a8046..26e0c06e7e1b8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -298,7 +298,7 @@ private static float absDelta(float lower, float higher) { private static boolean lessThan(float delta, float threshold) { /* deltas close to the threshold are "rounded" to the threshold manually to prevent floating point problems if the delta is very close to the - threshold ie. 1.000000002 which can trigger unnecessary balance actions*/ + threshold i.e. 1.000000002 which can trigger unnecessary balance actions*/ return delta <= (threshold + 0.001f); } @@ -913,7 +913,7 @@ private AllocateUnassignedDecision decideAllocateUnassigned(final ShardRouting s if (currentWeight == minWeight) { /* we have an equal weight tie breaking: * 1. if one decision is YES prefer it - * 2. prefer the node that holds the primary for this index with the next id in the ring ie. + * 2. prefer the node that holds the primary for this index with the next id in the ring i.e. * for the 3 shards 2 replica case we try to build up: * 1 2 0 * 2 0 1 diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index 053d696f6768c..fd980b53c2eaa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -108,7 +108,7 @@ public FilterAllocationDecider(Settings settings, ClusterSettings clusterSetting @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (shardRouting.unassigned()) { - // only for unassigned - we filter allocation right after the index creation ie. for shard shrinking etc. to ensure + // only for unassigned - we filter allocation right after the index creation i.e. for shard shrinking etc. to ensure // that once it has been allocated post API the replicas can be allocated elsewhere without user interaction // this is a setting that can only be set within the system! IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java index d58a625c6edc3..69216962cb784 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java @@ -45,7 +45,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); if (unassignedInfo != null && shardRouting.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { - // we only make decisions here if we have an unassigned info and we have to recover from another index ie. split / shrink + // we only make decisions here if we have an unassigned info and we have to recover from another index i.e. split / shrink final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); Index resizeSourceIndex = indexMetaData.getResizeSourceIndex(); assert resizeSourceIndex != null; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 1c0a0c0ef0a6b..2003dd4014d68 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -44,7 +44,7 @@ * {@link ShardRoutingState#RELOCATING relocating} state are ignored by this * {@link AllocationDecider} until the shard changed its state to either * {@link ShardRoutingState#STARTED started}, - * {@link ShardRoutingState#INITIALIZING inializing} or + * {@link ShardRoutingState#INITIALIZING initializing} or * {@link ShardRoutingState#UNASSIGNED unassigned} *

    * Note: Reducing the number of shards per node via the index update API can diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 596d3af261f17..91c155e38c8a8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -92,19 +92,19 @@ public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSet clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, - this::setConcurrentIncomingRecoverries); + this::setConcurrentIncomingRecoveries); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, - this::setConcurrentOutgoingRecoverries); + this::setConcurrentOutgoingRecoveries); logger.debug("using node_concurrent_outgoing_recoveries [{}], node_concurrent_incoming_recoveries [{}], " + "node_initial_primaries_recoveries [{}]", concurrentOutgoingRecoveries, concurrentIncomingRecoveries, primariesInitialRecoveries); } - private void setConcurrentIncomingRecoverries(int concurrentIncomingRecoveries) { + private void setConcurrentIncomingRecoveries(int concurrentIncomingRecoveries) { this.concurrentIncomingRecoveries = concurrentIncomingRecoveries; } - private void setConcurrentOutgoingRecoverries(int concurrentOutgoingRecoveries) { + private void setConcurrentOutgoingRecoveries(int concurrentOutgoingRecoveries) { this.concurrentOutgoingRecoveries = concurrentOutgoingRecoveries; } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TaskBatcher.java b/server/src/main/java/org/elasticsearch/cluster/service/TaskBatcher.java index 867d4191f800f..df04c44f4e0c5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TaskBatcher.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TaskBatcher.java @@ -160,7 +160,7 @@ void runIfNotProcessed(BatchedTask updateTask) { /** * Represents a runnable task that supports batching. - * Implementors of TaskBatcher can subclass this to add a payload to the task. + * Implementers of TaskBatcher can subclass this to add a payload to the task. */ protected abstract class BatchedTask extends SourcePrioritizedRunnable { /** diff --git a/server/src/main/java/org/elasticsearch/common/inject/Binder.java b/server/src/main/java/org/elasticsearch/common/inject/Binder.java index 03d164bcbaa52..8d43b8f3a1fef 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Binder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Binder.java @@ -29,7 +29,7 @@ /** * Collects configuration information (primarily bindings) which will be * used to create an {@link Injector}. Guice provides this object to your - * application's {@link Module} implementors so they may each contribute + * application's {@link Module} implementers so they may each contribute * their own bindings and other registrations. *

    The Guice Binding EDSL

    *

    diff --git a/server/src/main/java/org/elasticsearch/common/inject/Initializer.java b/server/src/main/java/org/elasticsearch/common/inject/Initializer.java index ce7d7765ce320..d0de175b23812 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/Initializer.java +++ b/server/src/main/java/org/elasticsearch/common/inject/Initializer.java @@ -76,7 +76,7 @@ public Initializable requestInjection(InjectorImpl injector, T instance, * Prepares member injectors for all injected instances. This prompts Guice to do static analysis * on the injected instances. */ - void validateOustandingInjections(Errors errors) { + void validateOutstandingInjections(Errors errors) { for (InjectableReference reference : pendingInjection.values()) { try { reference.validate(errors); diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java index 3a8cb51fee7f4..c10d82876d7a5 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorBuilder.java @@ -52,7 +52,7 @@ class InjectorBuilder { private Stage stage; private final Initializer initializer = new Initializer(); - private final BindingProcessor bindingProcesor; + private final BindingProcessor bindingProcessor; private final InjectionRequestProcessor injectionRequestProcessor; private final InjectorShell.Builder shellBuilder = new InjectorShell.Builder(); @@ -60,7 +60,7 @@ class InjectorBuilder { InjectorBuilder() { injectionRequestProcessor = new InjectionRequestProcessor(errors, initializer); - bindingProcesor = new BindingProcessor(errors, initializer); + bindingProcessor = new BindingProcessor(errors, initializer); } /** @@ -86,7 +86,7 @@ Injector build() { // Synchronize while we're building up the bindings and other injector state. This ensures that // the JIT bindings in the parent injector don't change while we're being built synchronized (shellBuilder.lock()) { - shells = shellBuilder.build(initializer, bindingProcesor, stopwatch, errors); + shells = shellBuilder.build(initializer, bindingProcessor, stopwatch, errors); stopwatch.resetAndLog("Injector construction"); initializeStatically(); @@ -101,7 +101,7 @@ Injector build() { * Initialize and validate everything. */ private void initializeStatically() { - bindingProcesor.initializeBindings(); + bindingProcessor.initializeBindings(); stopwatch.resetAndLog("Binding initialization"); for (InjectorShell shell : shells) { @@ -112,13 +112,13 @@ private void initializeStatically() { injectionRequestProcessor.process(shells); stopwatch.resetAndLog("Collecting injection requests"); - bindingProcesor.runCreationListeners(); + bindingProcessor.runCreationListeners(); stopwatch.resetAndLog("Binding validation"); injectionRequestProcessor.validate(); stopwatch.resetAndLog("Static validation"); - initializer.validateOustandingInjections(errors); + initializer.validateOutstandingInjections(errors); stopwatch.resetAndLog("Instance member validation"); new LookupProcessor(errors).process(shells); diff --git a/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java b/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java index 9e2d0e379867c..09dff8c82a051 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java +++ b/server/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider.java @@ -144,7 +144,7 @@ private FactoryProvider(TypeLiteral factoryType, private void checkDeclaredExceptionsMatch() { for (Map.Entry> entry : factoryMethodToConstructor.entrySet()) { for (Class constructorException : entry.getValue().getDeclaredExceptions()) { - if (!isConstructorExceptionCompatibleWithFactoryExeception( + if (!isConstructorExceptionCompatibleWithFactoryException( constructorException, entry.getKey().getExceptionTypes())) { throw newConfigurationException("Constructor %s declares an exception, but no compatible " + "exception is thrown by the factory method %s", entry.getValue(), entry.getKey()); @@ -153,7 +153,7 @@ private void checkDeclaredExceptionsMatch() { } } - private boolean isConstructorExceptionCompatibleWithFactoryExeception( + private boolean isConstructorExceptionCompatibleWithFactoryException( Class constructorException, Class[] factoryExceptions) { for (Class factoryException : factoryExceptions) { if (factoryException.isAssignableFrom(constructorException)) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java b/server/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java index 49e84c81ca615..ed7bd4d7956f3 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/TypeEncounter.java @@ -62,7 +62,7 @@ public interface TypeEncounter { /** * Returns the provider used to obtain instances for the given injection type. The returned - * provider will not be valid until the injetor has been created. The provider will throw an + * provider will not be valid until the injector has been created. The provider will throw an * {@code IllegalStateException} if you try to use it beforehand. */ Provider getProvider(Class type); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index fd9ffdfd31d16..3f2fd817a480d 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -1015,7 +1015,7 @@ private int readArraySize() throws IOException { throw new NegativeArraySizeException("array size must be positive but was: " + arraySize); } // lets do a sanity check that if we are reading an array size that is bigger that the remaining bytes we can safely - // throw an exception instead of allocating the array based on the size. A simple corrutpted byte can make a node go OOM + // throw an exception instead of allocating the array based on the size. A simple corrupted byte can make a node go OOM // if the size is large and for perf reasons we allocate arrays ahead of time ensureCanReadBytes(arraySize); return arraySize; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 4d4a2d838dbd3..305f4e85f364a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -114,7 +114,7 @@ public class Lucene { static { Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); - assert annotation == null : "PostingsFromat " + LATEST_POSTINGS_FORMAT + " is deprecated" ; + assert annotation == null : "PostingsFormat " + LATEST_POSTINGS_FORMAT + " is deprecated" ; annotation = DocValuesFormat.forName(LATEST_DOC_VALUES_FORMAT).getClass().getAnnotation(Deprecated.class); assert annotation == null : "DocValuesFormat " + LATEST_DOC_VALUES_FORMAT + " is deprecated" ; } diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index bbfae10991e84..6480448f0cd10 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -101,7 +101,7 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); - private final List transportIntercetors = new ArrayList<>(); + private final List transportInterceptors = new ArrayList<>(); /** * Creates a network module that custom networking classes can be plugged into. @@ -216,7 +216,7 @@ public Supplier getTransportSupplier() { * Registers a new {@link TransportInterceptor} */ private void registerTransportInterceptor(TransportInterceptor interceptor) { - this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + this.transportInterceptors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); } /** @@ -224,7 +224,7 @@ private void registerTransportInterceptor(TransportInterceptor interceptor) { * @see #registerTransportInterceptor(TransportInterceptor) */ public TransportInterceptor getTransportInterceptor() { - return new CompositeTransportInterceptor(this.transportIntercetors); + return new CompositeTransportInterceptor(this.transportInterceptors); } static final class CompositeTransportInterceptor implements TransportInterceptor { diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 752a9d5aba1eb..a783d2399798d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -245,7 +245,7 @@ public synchronized void addAffixUpdateConsumer(Setting.AffixSetting sett public synchronized void addAffixUpdateConsumer(Setting.AffixSetting settingA, Setting.AffixSetting settingB, BiConsumer> consumer, BiConsumer> validator) { - // it would be awesome to have a generic way to do that ie. a set of settings that map to an object with a builder + // it would be awesome to have a generic way to do that i.e. a set of settings that map to an object with a builder // down the road this would be nice to have! ensureSettingIsRegistered(settingA); ensureSettingIsRegistered(settingB); @@ -607,7 +607,7 @@ private boolean assertMatcher(String key, int numComplexMatchers) { } /** - * Returns true if the setting for the given key is dynamically updateable. Otherwise false. + * Returns true if the setting for the given key is dynamically updatable. Otherwise false. */ public boolean isDynamicSetting(String key) { final Setting setting = get(key); @@ -729,9 +729,9 @@ private boolean updateSettings(Settings toApply, Settings.Builder target, Settin changed |= toApply.get(key).equals(target.get(key)) == false; } else { if (isFinalSetting(key)) { - throw new IllegalArgumentException("final " + type + " setting [" + key + "], not updateable"); + throw new IllegalArgumentException("final " + type + " setting [" + key + "], not updatable"); } else { - throw new IllegalArgumentException(type + " setting [" + key + "], not dynamically updateable"); + throw new IllegalArgumentException(type + " setting [" + key + "], not dynamically updatable"); } } } @@ -896,7 +896,7 @@ public String setValue(String value) { } /** - * Returns true iff the setting is a private setting ie. it should be treated as valid even though it has no internal + * Returns true iff the setting is a private setting i.e. it should be treated as valid even though it has no internal * representation. Otherwise false */ // TODO this should be replaced by Setting.Property.HIDDEN or something like this. diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 127f06da1a44d..a770b2906ffb9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -93,13 +93,13 @@ public enum Property { Filtered, /** - * iff this setting can be dynamically updateable + * iff this setting can be dynamically updatable */ Dynamic, /** - * mark this setting as final, not updateable even when the context is not dynamic - * ie. Setting this property on an index scoped setting will fail update when the index is closed + * mark this setting as final, not updatable even when the context is not dynamic + * i.e. Setting this property on an index scoped setting will fail update when the index is closed */ Final, @@ -279,7 +279,7 @@ public final Key getRawKey() { } /** - * Returns true if this setting is dynamically updateable, otherwise false + * Returns true if this setting is dynamically updatable, otherwise false */ public final boolean isDynamic() { return properties.contains(Property.Dynamic); @@ -1205,7 +1205,7 @@ public static Setting> listSetting( final Setting> fallbackSetting, final Function singleValueParser, final Property... properties) { - return listSetting(key, fallbackSetting, singleValueParser, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), properties); + return listSetting(key, fallbackSetting, singleValueParser, (s) -> parsableStringToList(fallbackSetting.getRaw(s)), properties); } public static Setting> listSetting( @@ -1226,12 +1226,12 @@ public static Setting> listSetting( throw new IllegalArgumentException("default value function must not return null"); } Function> parser = (s) -> - parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); + parsableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); return new ListSetting<>(key, fallbackSetting, defaultStringValue, parser, properties); } - private static List parseableStringToList(String parsableString) { + private static List parsableStringToList(String parsableString) { // fromXContent doesn't use named xcontent or deprecation. try (XContentParser xContentParser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, parsableString)) { diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index 68e2cfd4fe317..f90714c6312d9 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -91,7 +91,7 @@ public TemporalAccessor parse(String input) { @Override public DateFormatter withZone(ZoneId zoneId) { - // shortcurt to not create new objects unnecessarily + // shortcut to not create new objects unnecessarily if (zoneId.equals(parsers[0].getZone())) { return this; } @@ -106,7 +106,7 @@ public DateFormatter withZone(ZoneId zoneId) { @Override public DateFormatter withLocale(Locale locale) { - // shortcurt to not create new objects unnecessarily + // shortcut to not create new objects unnecessarily if (locale.equals(parsers[0].getLocale())) { return this; } diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java b/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java index c2f55b8d9b939..3037c7a73c0d5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java @@ -29,7 +29,7 @@ /** * A utility class for multi threaded operation that needs to be cancellable via interrupts. Every cancellable operation should be - * executed via {@link #execute(Interruptable)}, which will capture the executing thread and make sure it is interrupted in the case + * executed via {@link #execute(Interruptible)}, which will capture the executing thread and make sure it is interrupted in the case * of cancellation. * * Cancellation policy: This class does not support external interruption via Thread#interrupt(). Always use #cancel() instead. @@ -77,33 +77,33 @@ private synchronized boolean add() { } /** - * run the Interruptable, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread + * run the Interruptible, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread * causing the call to prematurely return. * - * @param interruptable code to run + * @param interruptible code to run */ - public void execute(Interruptable interruptable) { + public void execute(Interruptible interruptible) { try { - executeIO(interruptable); + executeIO(interruptible); } catch (IOException e) { - assert false : "the passed interruptable can not result in an IOException"; + assert false : "the passed interruptible can not result in an IOException"; throw new RuntimeException("unexpected IO exception", e); } } /** - * run the Interruptable, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread + * run the Interruptible, capturing the executing thread. Concurrent calls to {@link #cancel(String)} will interrupt this thread * causing the call to prematurely return. * - * @param interruptable code to run + * @param interruptible code to run */ - public void executeIO(IOInterruptable interruptable) throws IOException { + public void executeIO(IOInterruptible interruptible) throws IOException { boolean wasInterrupted = add(); boolean cancelledByExternalInterrupt = false; RuntimeException runtimeException = null; IOException ioException = null; try { - interruptable.run(); + interruptible.run(); } catch (InterruptedException | ThreadInterruptedException e) { // ignore, this interrupt has been triggered by us in #cancel()... assert cancelled : "Interruption via Thread#interrupt() is unsupported. Use CancellableThreads#cancel() instead"; @@ -167,11 +167,11 @@ public synchronized void cancel(String reason) { } - public interface Interruptable extends IOInterruptable { + public interface Interruptible extends IOInterruptible { void run() throws InterruptedException; } - public interface IOInterruptable { + public interface IOInterruptible { void run() throws IOException, InterruptedException; } diff --git a/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java b/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java index acc2cbbfa57ee..e846448856300 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java @@ -32,7 +32,7 @@ public class LocaleUtils { /** * Parse the given locale as {@code language}, {@code language-country} or * {@code language-country-variant}. - * Either underscores or hyphens may be used as separators, but consistently, ie. + * Either underscores or hyphens may be used as separators, but consistently, i.e. * you may not use an hyphen to separate the language from the country and an * underscore to separate the country from the variant. * @throws IllegalArgumentException if there are too many parts in the locale string diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 3cc2eacd3bec6..36eb42e825e40 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -85,14 +85,14 @@ public PageCacheRecycler(Settings settings) { // We have a global amount of memory that we need to divide across data types. // Since some types are more useful than other ones we give them different weights. - // Trying to store all of them in a single stack would be problematic because eg. + // Trying to store all of them in a single stack would be problematic because e.g. // a work load could fill the recycler with only byte[] pages and then another // workload that would work with double[] pages couldn't recycle them because there // is no space left in the stack/queue. LRU/LFU policies are not an option either // because they would make obtain/release too costly: we really need constant-time // operations. // Ultimately a better solution would be to only store one kind of data and have the - // ability to interpret it either as a source of bytes, doubles, longs, etc. eg. thanks + // ability to interpret it either as a source of bytes, doubles, longs, etc. e.g. thanks // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues // that would need to be addressed such as garbage collection of native memory or safety // of Unsafe writes. diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java index 5dfd879590954..55061bf352473 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ConcurrentCollections.java @@ -49,8 +49,8 @@ public static ConcurrentMap newConcurrentMapWithAggressiveConcurren /** * Creates a new CHM with an aggressive concurrency level, aimed at high concurrent update rate long living maps. */ - public static ConcurrentMap newConcurrentMapWithAggressiveConcurrency(int initalCapacity) { - return new ConcurrentHashMap<>(initalCapacity, 0.75f, aggressiveConcurrencyLevel); + public static ConcurrentMap newConcurrentMapWithAggressiveConcurrency(int initialCapacity) { + return new ConcurrentHashMap<>(initialCapacity, 0.75f, aggressiveConcurrencyLevel); } public static ConcurrentMap newConcurrentMap() { @@ -58,7 +58,7 @@ public static ConcurrentMap newConcurrentMap() { } /** - * Creates a new CHM with an aggressive concurrency level, aimed at highly updateable long living maps. + * Creates a new CHM with an aggressive concurrency level, aimed at highly updatable long living maps. */ public static ConcurrentMapLong newConcurrentMapLongWithAggressiveConcurrency() { return new ConcurrentHashMapLong<>(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index 8bbf0a59ee06d..d0bb4680017ff 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -123,7 +123,7 @@ private boolean assertDefaultContext(Runnable r) { assert contextHolder.isDefaultContext() : "the thread context is not the default context and the thread [" + Thread.currentThread().getName() + "] is being returned to the pool after executing [" + r + "]"; } catch (IllegalStateException ex) { - // sometimes we execute on a closed context and isDefaultContext doen't bypass the ensureOpen checks + // sometimes we execute on a closed context and isDefaultContext doesn't bypass the ensureOpen checks // this must not trigger an exception here since we only assert if the default is restored and // we don't really care if we are closed if (contextHolder.isClosed() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java index 75c085d3bdd4a..46879130561c1 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java @@ -42,7 +42,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto public static double EWMA_ALPHA = 0.3; private static final Logger logger = LogManager.getLogger(QueueResizingEsThreadPoolExecutor.class); - // The amount the queue size is adjusted by for each calcuation + // The amount the queue size is adjusted by for each calculation private static final int QUEUE_ADJUSTMENT_AMOUNT = 50; private final Function runnableWrapper; @@ -79,7 +79,7 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto @Override protected void doExecute(final Runnable command) { - // we are submitting a task, it has not yet started running (because super.excute() has not + // we are submitting a task, it has not yet started running (because super.execute() has not // been called), but it could be immediately run, or run at a later time. We need the time // this task entered the queue, which we get by creating a TimedRunnable, which starts the // clock as soon as it is created. diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 54bf5fa1aa18e..6035d000e33e9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -651,7 +651,7 @@ public synchronized void updateMetaData(final IndexMetaData currentIndexMetaData // once we change the refresh interval we schedule yet another refresh // to ensure we are in a clean and predictable state. // it doesn't matter if we move from or to -1 in both cases we want - // docs to become visible immediately. This also flushes all pending indexing / search reqeusts + // docs to become visible immediately. This also flushes all pending indexing / search requests // that are waiting for a refresh. threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index ead9e7597fd73..a461748d59bfa 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -234,7 +234,7 @@ public final class IndexSettings { /** * Index setting to enable / disable deletes garbage collection. - * This setting is realtime updateable + * This setting is realtime updatable */ public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); public static final Setting INDEX_GC_DELETES_SETTING = @@ -916,7 +916,7 @@ public long getSoftDeleteRetentionOperations() { } /** - * Returns true if the this index should be searched throttled ie. using the + * Returns true if the this index should be searched throttled i.e. using the * {@link org.elasticsearch.threadpool.ThreadPool.Names#SEARCH_THROTTLED} thread-pool */ public boolean isSearchThrottled() { diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index 54f2b10ddeafb..4ed1c62460e19 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -210,8 +210,8 @@ void setMaxMergesAtOnce(Integer maxMergeAtOnce) { mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } - void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + void setFloorSegmentSetting(ByteSizeValue floorSegmentSetting) { + mergePolicy.setFloorSegmentMB(floorSegmentSetting.getMbFrac()); } void setExpungeDeletesAllowed(Double value) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 008b85331030d..450082140da41 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -655,7 +655,7 @@ public final Searcher acquireSearcher(String source) throws EngineException { * safe manner, preferably in a try/finally block. * * @param source the source API or routing that triggers this searcher acquire - * @param scope the scope of this searcher ie. if the searcher will be used for get or search purposes + * @param scope the scope of this searcher i.e. if the searcher will be used for get or search purposes * * @see Searcher#close() */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index f95ba96d343c9..2cd919b3b6060 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -82,7 +82,7 @@ public final class EngineConfig { /** * Index setting to change the low level lucene codec used for writing new segments. - * This setting is not realtime updateable. + * This setting is not realtime updatable. * This setting is also settable on the node and the index level, it's commonly used in hot/cold node archs where index is likely * allocated on both `kind` of nodes. */ @@ -143,7 +143,7 @@ public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, // local node so that decisions to flush segments to disk are made by // IndexingMemoryController rather than Lucene. // Add an escape hatch in case this change proves problematic - it used - // to be a fixed amound of RAM: 256 MB. + // to be a fixed amount of RAM: 256 MB. // TODO: Remove this escape hatch in 8.x final String escapeHatchProperty = "es.index.memory.max_index_buffer_size"; String maxBufferSize = System.getProperty(escapeHatchProperty); @@ -183,7 +183,7 @@ public ByteSizeValue getIndexingBufferSize() { } /** - * Returns true iff delete garbage collection in the engine should be enabled. This setting is updateable + * Returns true iff delete garbage collection in the engine should be enabled. This setting is updatable * in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. * The default is true *

    diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a295fbf3336b1..3b6665c7507fb 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1962,7 +1962,7 @@ private boolean failOnTragicEvent(AlreadyClosedException ex) { failEngine("already closed by tragic event on the translog", translog.getTragicException()); engineFailed = true; } else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet? - // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by + // this smells like a bug - we only expect ACE if we are in a fatal case i.e. either translog or IW is closed by // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error throw new AssertionError("Unexpected AlreadyClosedException", ex); } else { @@ -2034,7 +2034,7 @@ public List segments(boolean verbose) { /** * Closes the engine without acquiring the write lock. This should only be - * called while the write lock is hold or in a disaster condition ie. if the engine + * called while the write lock is hold or in a disaster condition i.e. if the engine * is failed. */ @Override @@ -2301,7 +2301,7 @@ protected void doRun() throws Exception { * @param writer the index writer to commit * @param translog the translog * @param syncId the sync flush ID ({@code null} if not committing a synced flush) - * @throws IOException if an I/O exception occurs committing the specfied writer + * @throws IOException if an I/O exception occurs committing the specified writer */ protected void commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { ensureCanFlush(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index e4dce8919cf1e..f8d82ceb5eec1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -56,7 +56,7 @@ private static final class VersionLookup { // respect that and fill the version map. The nice part here is that we are only really requiring this for a single ID and since // we hold the ID lock in the engine while we do all this it's safe to do it globally unlocked. // NOTE: these values can both be non-volatile since it's ok to read a stale value per doc ID. We serialize changes in the engine - // that will prevent concurrent updates to the same document ID and therefore we can rely on the happens-before guanratee of the + // that will prevent concurrent updates to the same document ID and therefore we can rely on the happens-before guarantee of the // map reference itself. private boolean unsafe; @@ -378,7 +378,7 @@ private boolean canRemoveTombstone(long maxTimestampToPrune, long maxSeqNoToPrun final boolean isTooOld = versionValue.time < maxTimestampToPrune; final boolean isSafeToPrune = versionValue.seqNo <= maxSeqNoToPrune; // version value can't be removed it's - // not yet flushed to lucene ie. it's part of this current maps object + // not yet flushed to lucene i.e. it's part of this current maps object final boolean isNotTrackedByCurrentMaps = versionValue.time < maps.getMinDeleteTimestamp(); return isTooOld && isSafeToPrune && isNotTrackedByCurrentMaps; } @@ -464,7 +464,7 @@ Map getAllTombstones() { } /** - * Acquires a releaseable lock for the given uId. All *UnderLock methods require + * Acquires a releasable lock for the given uId. All *UnderLock methods require * this lock to be hold by the caller otherwise the visibility guarantees of this version * map are broken. We assert on this lock to be hold when calling these methods. * @see KeyedLock diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SortedNumericDoubleValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/SortedNumericDoubleValues.java index d0d9fc4b4c79e..6262146a8acf4 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SortedNumericDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SortedNumericDoubleValues.java @@ -35,7 +35,7 @@ protected SortedNumericDoubleValues() {} /** Advance the iterator to exactly {@code target} and return whether * {@code target} has a value. * {@code target} must be greater than or equal to the current - * doc ID and must be a valid doc ID, ie. ≥ 0 and + * doc ID and must be a valid doc ID, i.e. ≥ 0 and * < {@code maxDoc}.*/ public abstract boolean advanceExact(int target) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java index 47307f27c4014..7fe84bfae6df9 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java @@ -50,7 +50,7 @@ public static boolean significantlySmallerThanSinglePackedOrdinals(int maxDoc, i float acceptableOverheadRatio) { int bitsPerOrd = PackedInts.bitsRequired(numOrds); bitsPerOrd = PackedInts.fastestFormatAndBits(numDocsWithValue, bitsPerOrd, acceptableOverheadRatio).bitsPerValue; - // Compute the worst-case number of bits per value for offsets in the worst case, eg. if no docs have a value at the + // Compute the worst-case number of bits per value for offsets in the worst case, e.g. if no docs have a value at the // beginning of the block and all docs have one at the end of the block final float avgValuesPerDoc = (float) numDocsWithValue / maxDoc; final int maxDelta = (int) Math.ceil(OFFSETS_PAGE_SIZE * (1 - avgValuesPerDoc) * avgValuesPerDoc); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 54e59691f80d5..49103505b3085 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -683,37 +683,37 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont if (token == XContentParser.Token.VALUE_STRING) { String text = context.parser().text(); - boolean parseableAsLong = false; + boolean parsableAsLong = false; try { Long.parseLong(text); - parseableAsLong = true; + parsableAsLong = true; } catch (NumberFormatException e) { // not a long number } - boolean parseableAsDouble = false; + boolean parsableAsDouble = false; try { Double.parseDouble(text); - parseableAsDouble = true; + parsableAsDouble = true; } catch (NumberFormatException e) { // not a double number } - if (parseableAsLong && context.root().numericDetection()) { + if (parsableAsLong && context.root().numericDetection()) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); if (builder == null) { builder = newLongBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; - } else if (parseableAsDouble && context.root().numericDetection()) { + } else if (parsableAsDouble && context.root().numericDetection()) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE); if (builder == null) { builder = newFloatBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; - } else if (parseableAsLong == false && parseableAsDouble == false && context.root().dateDetection()) { + } else if (parsableAsLong == false && parsableAsDouble == false && context.root().dateDetection()) { // We refuse to match pure numbers, which are too likely to be - // false positives with date formats that include eg. + // false positives with date formats that include e.g. // `epoch_millis` or `YYYY` for (DateFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { try { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index aafe9f6ba03de..924e6e2357b5e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -299,7 +299,7 @@ public String mappingType(String dynamicType) { } if (type.equals(mapping.get("type")) == false // either the type was not set, or we updated it through replacements && "text".equals(type)) { // and the result is "text" - // now that string has been splitted into text and keyword, we use text for + // now that string has been split into text and keyword, we use text for // dynamic mappings. However before it used to be possible to index as a keyword // by setting index=not_analyzed, so for now we will use a keyword field rather // than a text field if index=not_analyzed and the field type was not specified diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 72dbe28d12d09..f080105fb1656 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -82,7 +82,7 @@ public T index(boolean index) { if (index) { if (fieldType.indexOptions() == IndexOptions.NONE) { /* - * the logic here is to reset to the default options only if we are not indexed ie. options are null + * the logic here is to reset to the default options only if we are not indexed i.e. options are null * if the fieldType has a non-null option we are all good it might have been set through a different * call. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 79b2b0c4c67d1..28421fb9da672 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -268,7 +268,7 @@ protected void parseCreateField(ParseContext context, List field for (IndexableField field : document.getFields()) { final String path = field.name(); if (path.equals(previousPath)) { - // Sometimes mappers create multiple Lucene fields, eg. one for indexing, + // Sometimes mappers create multiple Lucene fields, e.g. one for indexing, // one for doc values and one for storing. Deduplicating is not required // for correctness but this simple check helps save utf-8 conversions and // gives Lucene fewer values to deal with. diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index d2b432e7c7ca1..b07ee33e34f4e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -383,7 +383,7 @@ public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { int parentDocId = hit.docId(); final int readerIndex = ReaderUtil.subIndex(parentDocId, searcher().getIndexReader().leaves()); - // With nested inner hits the nested docs are always in the same segement, so need to use the other segments + // With nested inner hits the nested docs are always in the same segment, so need to use the other segments LeafReaderContext ctx = searcher().getIndexReader().leaves().get(readerIndex); Query childFilter = childObjectMapper.nestedTypeFilter(); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index b275088d89441..c7303d6e6c5ff 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -77,7 +77,7 @@ public QueryShardContext convertToShardContext() { /** * Registers an async action that must be executed before the next rewrite round in order to make progress. - * This should be used if a rewriteabel needs to fetch some external resources in order to be executed ie. a document + * This should be used if a rewritable needs to fetch some external resources in order to be executed i.e. a document * from an index. */ public void registerAsyncAction(BiConsumer> asyncAction) { diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index f7f1d29f53098..865f52c8671b7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -88,7 +88,7 @@ public class QueryShardContext extends QueryRewriteContext { private final IndexReader reader; private final String clusterAlias; private String[] types = Strings.EMPTY_ARRAY; - private boolean cachable = true; + private boolean cacheable = true; private final SetOnce frozen = new SetOnce<>(); private final Index fullyQualifiedIndex; @@ -333,7 +333,7 @@ public final void freezeContext() { * class says a request can be cached. */ protected final void failIfFrozen() { - this.cachable = false; + this.cacheable = false; if (frozen.get() == Boolean.TRUE) { throw new IllegalArgumentException("features that prevent cachability are disabled on this context"); } else { @@ -354,10 +354,10 @@ public void executeAsyncActions(ActionListener listener) { } /** - * Returns true iff the result of the processed search request is cachable. Otherwise false + * Returns true iff the result of the processed search request is cacheable. Otherwise false */ - public final boolean isCachable() { - return cachable; + public final boolean isCacheable() { + return cacheable; } /** diff --git a/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java b/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java index ba8d6b84d5374..4599764233ba8 100644 --- a/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java +++ b/server/src/main/java/org/elasticsearch/index/query/Rewriteable.java @@ -59,7 +59,7 @@ static > T rewrite(T original, QueryRewriteContext cont * @param original the original rewriteable to rewrite * @param context the rewrite context to use * @param assertNoAsyncTasks if true the rewrite will fail if there are any pending async tasks on the context after the - * rewrite. See {@link QueryRewriteContext#executeAsyncActions(ActionListener)} for detals + * rewrite. See {@link QueryRewriteContext#executeAsyncActions(ActionListener)} for details * @throws IOException if an {@link IOException} occurs */ static > T rewrite(T original, QueryRewriteContext context, boolean assertNoAsyncTasks) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 8a3666afb9d12..9a9c4e4cba92e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -197,7 +197,7 @@ public float matchCost() { @Override public boolean isCacheable(LeafReaderContext ctx) { // TODO: Change this to true when we can assume that scripts are pure functions - // ie. the return value is always the same given the same conditions and may not + // i.e. the return value is always the same given the same conditions and may not // depend on the current timestamp, other documents, etc. return false; } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 466341e3cc8b4..fecb00a2c803f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -480,7 +480,7 @@ public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throw boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD; String quoteFieldSuffix = null; boolean autoGenerateSynonymsPhraseQuery = true; - int fuzzyPrefixLenght = SimpleQueryStringBuilder.DEFAULT_FUZZY_PREFIX_LENGTH; + int fuzzyPrefixLength = SimpleQueryStringBuilder.DEFAULT_FUZZY_PREFIX_LENGTH; int fuzzyMaxExpansions = SimpleQueryStringBuilder.DEFAULT_FUZZY_MAX_EXPANSIONS; boolean fuzzyTranspositions = SimpleQueryStringBuilder.DEFAULT_FUZZY_TRANSPOSITIONS; @@ -538,7 +538,7 @@ public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throw } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) { autoGenerateSynonymsPhraseQuery = parser.booleanValue(); } else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - fuzzyPrefixLenght = parser.intValue(); + fuzzyPrefixLength = parser.intValue(); } else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { fuzzyMaxExpansions = parser.intValue(); } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -569,7 +569,7 @@ public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throw } qb.analyzeWildcard(analyzeWildcard).boost(boost).quoteFieldSuffix(quoteFieldSuffix); qb.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); - qb.fuzzyPrefixLength(fuzzyPrefixLenght); + qb.fuzzyPrefixLength(fuzzyPrefixLength); qb.fuzzyMaxExpansions(fuzzyMaxExpansions); qb.fuzzyTranspositions(fuzzyTranspositions); return qb; diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java index 1e151896df046..ca7a25a4f942b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java @@ -330,7 +330,7 @@ public String toString() { @Override public boolean isCacheable(LeafReaderContext ctx) { // TODO: Change this to true when we can assume that scripts are pure functions - // ie. the return value is always the same given the same conditions and may not + // i.e. the return value is always the same given the same conditions and may not // depend on the current timestamp, other documents, etc. return false; } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index 7d6dd4a59cb19..9ad1d79e78155 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -507,17 +507,17 @@ public abstract static class AbstractDistanceScoreFunction extends ScoreFunction private final DecayFunction func; protected final MultiValueMode mode; - public AbstractDistanceScoreFunction(double userSuppiedScale, double decay, double offset, DecayFunction func, + public AbstractDistanceScoreFunction(double userSuppliedScale, double decay, double offset, DecayFunction func, MultiValueMode mode) { super(CombineFunction.MULTIPLY); this.mode = mode; - if (userSuppiedScale <= 0.0) { + if (userSuppliedScale <= 0.0) { throw new IllegalArgumentException(FunctionScoreQueryBuilder.NAME + " : scale must be > 0.0."); } if (decay <= 0.0 || decay >= 1.0) { throw new IllegalArgumentException(FunctionScoreQueryBuilder.NAME + " : decay must be in the range [0..1]."); } - this.scale = func.processScale(userSuppiedScale, decay); + this.scale = func.processScale(userSuppliedScale, decay); this.func = func; if (offset < 0.0d) { throw new IllegalArgumentException(FunctionScoreQueryBuilder.NAME + " : offset must be > 0.0"); diff --git a/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java b/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java index 5e6aa3bb7c456..22a4dfef58337 100644 --- a/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java @@ -61,7 +61,7 @@ public Query rewrite(IndexReader reader) throws IOException { if (innerRewrite != query) { // Right now ToParentBlockJoinQuery always rewrites to a ToParentBlockJoinQuery // so the else block will never be used. It is useful in the case that - // ToParentBlockJoinQuery one day starts to rewrite to a different query, eg. + // ToParentBlockJoinQuery one day starts to rewrite to a different query, e.g. // a MatchNoDocsQuery if it realizes that it cannot match any docs and rewrites // to a MatchNoDocsQuery. In that case it would be fine to lose information // about the nested path. diff --git a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java index 1c17fa0cb935f..a87b77de5a21d 100644 --- a/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/NestedHelper.java @@ -96,7 +96,7 @@ public boolean mightMatchNestedDocs(Query query) { /** Returns true if a query on the given field might match nested documents. */ boolean mightMatchNestedDocs(String field) { if (field.startsWith("_")) { - // meta field. Every meta field behaves differently, eg. nested + // meta field. Every meta field behaves differently, e.g. nested // documents have the same _uid as their parent, put their path in // the _type field but do not have _field_names. So we just ignore // meta fields and return true, which is always safe, it just means @@ -165,7 +165,7 @@ public boolean mightMatchNonNestedDocs(Query query, String nestedPath) { * or documents that are nested under a different path. */ boolean mightMatchNonNestedDocs(String field, String nestedPath) { if (field.startsWith("_")) { - // meta field. Every meta field behaves differently, eg. nested + // meta field. Every meta field behaves differently, e.g. nested // documents have the same _uid as their parent, put their path in // the _type field but do not have _field_names. So we just ignore // meta fields and return true, which is always safe, it just means diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 84597d4d3383c..44b753621bab7 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -132,7 +132,7 @@ public QueryStringQueryParser(QueryShardContext context, Map fiel } /** - * Defaults to all queryiable fields extracted from the mapping for query terms + * Defaults to all queryable fields extracted from the mapping for query terms * @param context The query shard context * @param lenient If set to `true` will cause format based failures (like providing text to a numeric field) to be ignored. */ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 8b0db9543e33b..68da820ca9a71 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1298,7 +1298,7 @@ private Engine.Result applyTranslogOperation(Engine engine, Translog.Operation o switch (operation.opType()) { case INDEX: final Translog.Index index = (Translog.Index) operation; - // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all + // we set canHaveDuplicates to true all the time such that we de-optimize the translog case and ensure that all // autoGeneratedID docs that are coming from the primary are updated correctly. result = applyIndexOperation(engine, index.seqNo(), index.primaryTerm(), index.version(), versionType, UNASSIGNED_SEQ_NO, 0, index.getAutoGeneratedIdTimestamp(), true, origin, diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index fb33cceaa49d8..5d1a30adae56f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -326,7 +326,7 @@ public boolean matches() throws IOException { if (doc > nextParent) { // we only check once per nested/parent set nextParent = parentDocs.nextSetBit(doc); - // never check a child document against the visitor, they neihter have _id nor _routing as stored fields + // never check a child document against the visitor, they neither have _id nor _routing as stored fields nextParentMatches = visitor.matches(nextParent); } return nextParentMatches; diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index feb48ef85d1ba..101978abd093d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -78,7 +78,7 @@ final class StoreRecovery { /** * Recovers a shard from it's local file system store. This method required pre-knowledge about if the shard should - * exist on disk ie. has been previously allocated or if the shard is a brand new allocation without pre-existing index + * exist on disk i.e. has been previously allocated or if the shard is a brand new allocation without pre-existing index * files / transaction logs. This * @param indexShard the index shard instance to recovery the shard into * @return true if the shard has been recovered successfully, false if the recovery diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index 04970a38bd99d..59bc8fdf2964f 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -120,10 +120,10 @@ private SimilarityProviders() {} // no instantiation distributions.put("spl", new DistributionSPL()); DISTRIBUTIONS = unmodifiableMap(distributions); - Map lamdas = new HashMap<>(); - lamdas.put("df", new LambdaDF()); - lamdas.put("ttf", new LambdaTTF()); - LAMBDAS = unmodifiableMap(lamdas); + Map lambdas = new HashMap<>(); + lambdas.put("df", new LambdaDF()); + lambdas.put("ttf", new LambdaTTF()); + LAMBDAS = unmodifiableMap(lambdas); } /** diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 66e3e4d5558d8..4772f8840574a 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -111,10 +111,10 @@ * of file abstraction in Lucene used to read and write Lucene indices. * This class also provides access to metadata information like checksums for committed files. A committed * file is a file that belongs to a segment written by a Lucene commit. Files that have not been committed - * ie. created during a merge or a shard refresh / NRT reopen are not considered in the MetadataSnapshot. + * i.e. created during a merge or a shard refresh / NRT reopen are not considered in the MetadataSnapshot. *

    * Note: If you use a store it's reference count should be increased before using it by calling #incRef and a - * corresponding #decRef must be called in a try/finally block to release the store again ie.: + * corresponding #decRef must be called in a try/finally block to release the store again i.e.: *

      *      store.incRef();
      *      try {
    @@ -947,7 +947,7 @@ public Map asMap() {
              * Returns a diff between the two snapshots that can be used for recovery. The given snapshot is treated as the
              * recovery target and this snapshot as the source. The returned diff will hold a list of files that are:
              * 
      - *
    • identical: they exist in both snapshots and they can be considered the same ie. they don't need to be recovered
    • + *
    • identical: they exist in both snapshots and they can be considered the same i.e. they don't need to be recovered
    • *
    • different: they exist in both snapshots but their they are not identical
    • *
    • missing: files that exist in the source but not in the target
    • *
    @@ -1093,7 +1093,7 @@ public String getSyncId() { */ public static final class RecoveryDiff { /** - * Files that exist in both snapshots and they can be considered the same ie. they don't need to be recovered + * Files that exist in both snapshots and they can be considered the same i.e. they don't need to be recovered */ public final List identical; /** @@ -1208,7 +1208,7 @@ private void readAndCompareChecksum() throws IOException { @Override public void writeBytes(byte[] b, int offset, int length) throws IOException { if (writtenBytes + length > checksumPosition) { - for (int i = 0; i < length; i++) { // don't optimze writing the last block of bytes + for (int i = 0; i < length; i++) { // don't optimize writing the last block of bytes writeByte(b[offset+i]); } } else { diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index a05870b842f2d..d3025ef2856ba 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -267,7 +267,7 @@ private static Fields generateTermVectors(IndexShard indexShard, Map> values = new HashMap<>(); for (DocumentField getField : getFields) { String field = getField.getName(); - if (fields.contains(field)) { // some fields are returned even when not asked for, eg. _timestamp + if (fields.contains(field)) { // some fields are returned even when not asked for, e.g. _timestamp values.put(field, getField.getValues()); } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index bfd9e31abcd47..8f5884d02758e 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -148,7 +148,7 @@ public long getLastModifiedTime() throws IOException { } /** - * Reads a single opertation from the given location. + * Reads a single operation from the given location. */ Translog.Operation read(Translog.Location location) throws IOException { assert location.generation == this.generation : "generation mismatch expected: " + generation + " got: " + location.generation; diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index d8acba635f822..8732841befc78 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -227,7 +227,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws } final String checkpointTranslogFile = getFilename(checkpoint.generation); - // we open files in reverse order in order to validate tranlsog uuid before we start traversing the translog based on + // we open files in reverse order in order to validate translog uuid before we start traversing the translog based on // the generation id we found in the lucene commit. This gives for better error messages if the wrong // translog was found. foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint)); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index a75f49972a5d2..07b7ca144eef4 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -76,7 +76,7 @@ public static TranslogReader open( } /** - * Closes current reader and creates new one with new checkoint and same file channel + * Closes current reader and creates new one with new checkpoint and same file channel */ TranslogReader closeIntoTrimmedReader(long aboveSeqNo, ChannelFactory channelFactory) throws IOException { if (closed.compareAndSet(false, true)) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 6b00b0c5db3ff..5666afdb912a9 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -379,7 +379,7 @@ protected void readBytes(ByteBuffer targetBuffer, long position) throws IOExcept if (position + targetBuffer.remaining() > getWrittenOffset()) { synchronized (this) { // we only flush here if it's really really needed - try to minimize the impact of the read operation - // in some cases ie. a tragic event we might still be able to read the relevant value + // in some cases i.e. a tragic event we might still be able to read the relevant value // which is not really important in production but some test can make most strict assumptions // if we don't fail in this call unless absolutely necessary. if (position + targetBuffer.remaining() > getWrittenOffset()) { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index 49c2d070c0310..b861c07b99722 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -189,8 +189,8 @@ interface CacheEntity extends Accountable { void onCached(Key key, BytesReference value); /** - * Returns true iff the resource behind this entity is still open ie. - * entities associated with it can remain in the cache. ie. IndexShard is still open. + * Returns true iff the resource behind this entity is still open i.e. + * entities associated with it can remain in the cache. i.e. IndexShard is still open. */ boolean isOpen(); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 19388a2b63d4d..aa646b09f4375 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -1014,7 +1014,7 @@ public int compareTo(PendingDelete o) { /** * Processes all pending deletes for the given index. This method will acquire all locks for the given index and will * process all pending deletes for this index. Pending deletes might occur if the OS doesn't allow deletion of files because - * they are used by a different process ie. on Windows where files might still be open by a virus scanner. On a shared + * they are used by a different process i.e. on Windows where files might still be open by a virus scanner. On a shared * filesystem a replica might not have been closed when the primary is deleted causing problems on delete calls so we * schedule there deletes later. * @param index the index to process the pending deletes for @@ -1212,7 +1212,7 @@ public boolean canCache(ShardSearchRequest request, SearchContext context) { // if now in millis is used (or in the future, a more generic "isDeterministic" flag // then we can't cache based on "now" key within the search request, as it is not deterministic - if (context.getQueryShardContext().isCachable() == false) { + if (context.getQueryShardContext().isCacheable() == false) { return false; } return true; @@ -1283,7 +1283,7 @@ private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader r IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard); Supplier supplier = () -> { /* BytesStreamOutput allows to pass the expected size but by default uses - * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. + * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result i.e. * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful * since we don't shrink to the actual size once we are done serializing. * By passing 512 as the expected size we will resize the byte array in the stream diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 60a2b1640ed5b..5ee4855463b9a 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -281,7 +281,7 @@ public interface AnalysisProvider { * @param name the name of the analysis component * @return a new provider instance * @throws IOException if an {@link IOException} occurs - * @throws IllegalArgumentException if the provider requires analysis settings ie. if {@link #requiresAnalysisSettings()} returns + * @throws IllegalArgumentException if the provider requires analysis settings i.e. if {@link #requiresAnalysisSettings()} returns * true */ default T get(Environment environment, String name) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 46f98275740ae..f6196e36ba2d3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -237,7 +237,7 @@ private boolean isTargetSameHistory() { return targetHistoryUUID != null && targetHistoryUUID.equals(shard.getHistoryUUID()); } - static void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, String reason, + static void runUnderPrimaryPermit(CancellableThreads.Interruptible runnable, String reason, IndexShard primary, CancellableThreads cancellableThreads, Logger logger) { cancellableThreads.execute(() -> { CompletableFuture permit = new CompletableFuture<>(); @@ -563,7 +563,7 @@ protected SendSnapshotResult sendSnapshot(final long startingSeqNo, long require logger.trace("no translog operations to send"); } - final CancellableThreads.IOInterruptable sendBatch = () -> { + final CancellableThreads.IOInterruptible sendBatch = () -> { final long targetCheckpoint = recoveryTarget.indexTranslogOperations( operations, expectedTotalOps, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes); targetLocalCheckpoint.set(targetCheckpoint); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 3a3a78941b1b7..609292c06e082 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -271,7 +271,7 @@ public void markAsDone() { if (finished.compareAndSet(false, true)) { assert tempFileNames.isEmpty() : "not all temporary files are renamed"; try { - // this might still throw an exception ie. if the shard is CLOSED due to some other event. + // this might still throw an exception i.e. if the shard is CLOSED due to some other event. // it's safer to decrement the reference in a try finally here. indexShard.postRecovery("peer recovery done"); } finally { diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 7a07d8f62b229..535ebc7c9d78a 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -74,7 +74,7 @@ public class IndicesStore implements ClusterStateListener, Closeable { private static final Logger logger = LogManager.getLogger(IndicesStore.class); - // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService + // TODO this class can be folded into either IndicesService and partially into IndicesClusterStateService // there is no need for a separate public service public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), diff --git a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java index 4f9ec9488b430..72f6b7fa58a28 100644 --- a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java +++ b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java @@ -111,7 +111,7 @@ public Optional getNodeStatistics(final String nodeId) { public static class ComputedNodeStats implements Writeable { // We store timestamps with nanosecond precision, however, the // formula specifies milliseconds, therefore we need to convert - // the values so the times don't unduely weight the formula + // the values so the times don't unduly weight the formula private final double FACTOR = 1000000.0; private final int clientNum; diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java b/server/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java index c460331afaa9d..076882f1bf4e7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java @@ -42,7 +42,7 @@ public final RestResponse buildResponse(Response response) throws Exception { } /** - * Builds a response to send back over the channel. Implementors should ensure that they close the provided {@link XContentBuilder} + * Builds a response to send back over the channel. Implementers should ensure that they close the provided {@link XContentBuilder} * using the {@link XContentBuilder#close()} method. */ public abstract RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index ccb10b603d3b2..5d8c93ca4337c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -51,7 +51,7 @@ public RestGetRepositoriesAction(Settings settings, RestController controller, S @Override public String getName() { - return "get_respositories_action"; + return "get_repositories_action"; } @Override diff --git a/server/src/main/java/org/elasticsearch/script/ClassPermission.java b/server/src/main/java/org/elasticsearch/script/ClassPermission.java index f636a0190c47f..33f55ae953350 100644 --- a/server/src/main/java/org/elasticsearch/script/ClassPermission.java +++ b/server/src/main/java/org/elasticsearch/script/ClassPermission.java @@ -76,7 +76,7 @@ public final class ClassPermission extends BasicPermission { public static final String STANDARD = "<>"; /** Typical set of classes for scripting: basic data types, math, dates, and simple collections */ - // this is the list from the old grovy sandbox impl (+ some things like String, Iterator, etc that were missing) + // this is the list from the old groovy sandbox impl (+ some things like String, Iterator, etc that were missing) public static final Set STANDARD_CLASSES = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( // jdk classes java.lang.Boolean.class.getName(), diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 2531685b94557..8be02a86dd4b5 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -466,7 +466,7 @@ private void registerPipelineAggregations(List plugins) { MaxBucketPipelineAggregationBuilder::new, MaxBucketPipelineAggregator::new, MaxBucketPipelineAggregationBuilder.PARSER) - // This bucket is used by many pipeline aggreations. + // This bucket is used by many pipeline aggregations. .addResultReader(InternalBucketMetricValue.NAME, InternalBucketMetricValue::new)); registerPipelineAggregation(new PipelineAggregationSpec( MinBucketPipelineAggregationBuilder.NAME, @@ -479,7 +479,7 @@ private void registerPipelineAggregations(List plugins) { AvgBucketPipelineAggregationBuilder::new, AvgBucketPipelineAggregator::new, AvgBucketPipelineAggregationBuilder.PARSER) - // This bucket is used by many pipeline aggreations. + // This bucket is used by many pipeline aggregations. .addResultReader(InternalSimpleValue.NAME, InternalSimpleValue::new)); registerPipelineAggregation(new PipelineAggregationSpec( SumBucketPipelineAggregationBuilder.NAME, diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 500e70a65b486..24cc4b45b74fe 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -691,7 +691,7 @@ private DefaultSearchContext createSearchContext(ShardSearchRequest request, Tim // during rewrite and normalized / evaluate templates etc. QueryShardContext context = new QueryShardContext(searchContext.getQueryShardContext()); Rewriteable.rewrite(request.getRewriteable(), context, assertAsyncActions); - assert searchContext.getQueryShardContext().isCachable(); + assert searchContext.getQueryShardContext().isCacheable(); success = true; } finally { if (success == false) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index 568a692ba61c0..ff3c82f6e8c91 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -73,7 +73,7 @@ protected AggregatorBase(String name, AggregatorFactories factories, SearchConte this.parent = parent; this.context = context; this.breakerService = context.bigArrays().breakerService(); - assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead"; + assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggregatorFactories.EMPTY instead"; this.subAggregators = factories.createSubAggregators(this); context.addReleasable(this, Lifetime.PHASE); // Register a safeguard to highlight any invalid construction logic (call to this constructor without subsequent preCollection call) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index d6eb73514d9c3..442f04b26ffa1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -191,7 +191,7 @@ public List createPipelineAggregators() throws IOException { public Aggregator[] createSubAggregators(Aggregator parent) throws IOException { Aggregator[] aggregators = new Aggregator[countAggregators()]; for (int i = 0; i < factories.length; ++i) { - // TODO: sometimes even sub aggregations always get called with bucket 0, eg. if + // TODO: sometimes even sub aggregations always get called with bucket 0, e.g. if // you have a terms agg under a top-level filter agg. We should have a way to // propagate the fact that only bucket 0 will be collected with single-bucket // aggs @@ -347,16 +347,16 @@ private List resolvePipelineAggregatorOrder( for (AggregationBuilder aggBuilder : aggregationBuilders) { aggBuildersMap.put(aggBuilder.name, aggBuilder); } - List orderedPipelineAggregatorrs = new LinkedList<>(); + List orderedPipelineAggregators = new LinkedList<>(); List unmarkedBuilders = new ArrayList<>(pipelineAggregatorBuilders); Collection temporarilyMarked = new HashSet<>(); while (!unmarkedBuilders.isEmpty()) { PipelineAggregationBuilder builder = unmarkedBuilders.get(0); builder.validate(parent, aggregationBuilders, pipelineAggregatorBuilders); - resolvePipelineAggregatorOrder(aggBuildersMap, pipelineAggregatorBuildersMap, orderedPipelineAggregatorrs, unmarkedBuilders, + resolvePipelineAggregatorOrder(aggBuildersMap, pipelineAggregatorBuildersMap, orderedPipelineAggregators, unmarkedBuilders, temporarilyMarked, builder); } - return orderedPipelineAggregatorrs; + return orderedPipelineAggregators; } private void resolvePipelineAggregatorOrder(Map aggBuildersMap, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 71dacc698bee6..dc36f31883227 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -114,7 +114,7 @@ public final void incrementBucketDocCount(long bucketOrd, int inc) { */ public final int bucketDocCount(long bucketOrd) { if (bucketOrd >= docCounts.size()) { - // This may happen eg. if no document in the highest buckets is accepted by a sub aggregator. + // This may happen e.g. if no document in the highest buckets is accepted by a sub aggregator. // For example, if there is a long terms agg on 3 terms 1,2,3 with a sub filter aggregator and if no document with 3 as a value // matches the filter, then the filter will never collect bucket ord 3. However, the long terms agg will call // bucketAggregations(3) on the filter aggregator anyway to build sub-aggregations. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 04dd8d3a53cea..188202873ca88 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -52,7 +52,7 @@ public FilterAggregatorFactory(String name, QueryBuilder filterBuilder, SearchCo * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. * - * Note that as aggregations are initialsed and executed in a serial manner, + * Note that as aggregations are initialised and executed in a serial manner, * no concurrency considerations are necessary here. */ public Weight getWeight() { @@ -61,7 +61,7 @@ public Weight getWeight() { try { weight = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } catch (IOException e) { - throw new AggregationInitializationException("Failed to initialse filter", e); + throw new AggregationInitializationException("Failed to initialise filter", e); } } return weight; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index cc299765621aa..d74cd2ba89ad1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -66,7 +66,7 @@ public FiltersAggregatorFactory(String name, List filters, boolean * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. * - * Note that as aggregations are initialsed and executed in a serial manner, + * Note that as aggregations are initialised and executed in a serial manner, * no concurrency considerations are necessary here. */ public Weight[] getWeights() { @@ -78,7 +78,7 @@ public Weight[] getWeights() { this.weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filters[i]), ScoreMode.COMPLETE_NO_SCORES, 1); } } catch (IOException e) { - throw new AggregationInitializationException("Failed to initialse filters for aggregation [" + name() + "]", e); + throw new AggregationInitializationException("Failed to initialise filters for aggregation [" + name() + "]", e); } } return weights; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 2bd30a677a0af..7b532bcf76c35 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -1033,11 +1033,11 @@ private Object getComparableData(long bucket) { Map values = new HashMap<>(); for (long i = 0; i < runLens.size(); i++) { byte runLength = runLens.get((bucket << p) + i); - Integer numOccurances = values.get(runLength); - if (numOccurances == null) { + Integer numOccurrences = values.get(runLength); + if (numOccurrences == null) { values.put(runLength, 1); } else { - values.put(runLength, numOccurances + 1); + values.put(runLength, numOccurrences + 1); } } return values; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java index c017eb4a5e3bc..46dcc08457bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java @@ -100,7 +100,7 @@ public ScoreMode scoreMode() { public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { // Create leaf collectors here instead of at the aggregator level. Otherwise in case this collector get invoked // when post collecting then we have already replaced the leaf readers on the aggregator level have already been - // replaced with the next leaf readers and then post collection pushes docids of the previous segement, which + // replaced with the next leaf readers and then post collection pushes docids of the previous segment, which // then causes assertions to trip or incorrect top docs to be computed. final LongObjectHashMap leafCollectors = new LongObjectHashMap<>(1); return new LeafBucketCollectorBase(sub, null) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index c7474fb800f25..bdfbbe1ea6cb4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -334,7 +334,7 @@ public void validate(Aggregator root) throws AggregationExecutionException { "Either drop the key (a la \"" + lastToken.name + "\") or change it to \"doc_count\" (a la \"" + lastToken.name + ".doc_count\")"); } - return; // perfectly valid to sort on single-bucket aggregation (will be sored on its doc_count) + return; // perfectly valid to sort on single-bucket aggregation (will be sorted on its doc_count) } if (aggregator instanceof NumericMetricsAggregator.SingleValue) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index acc0d2ee20bcb..70df615475e81 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -39,7 +39,7 @@ * Similar to {@link ValuesSourceAggregationBuilder}, except it references multiple ValuesSources (e.g. so that an aggregation * can pull values from multiple fields). * - * A limitation of this class is that all the ValuesSource's being refereenced must be of the same type. + * A limitation of this class is that all the ValuesSource's being referenced must be of the same type. */ public abstract class MultiValuesSourceAggregationBuilder> extends AbstractAggregationBuilder { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 161ca9279f094..13c1c08ff82b7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -188,9 +188,9 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(forceSource); out.writeOptionalWriteable(boundaryScannerType); out.writeOptionalVInt(boundaryMaxScan); - boolean hasBounaryChars = boundaryChars != null; - out.writeBoolean(hasBounaryChars); - if (hasBounaryChars) { + boolean hasBoundaryChars = boundaryChars != null; + out.writeBoolean(hasBoundaryChars); + if (hasBoundaryChars) { out.writeString(String.valueOf(boundaryChars)); } boolean hasBoundaryScannerLocale = boundaryScannerLocale != null; diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 70a52c39ee110..a0292d2fccb18 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -74,7 +74,7 @@ * state from one query / fetch phase to another. * * This class also implements {@link RefCounted} since in some situations like in {@link org.elasticsearch.search.SearchService} - * a SearchContext can be closed concurrently due to independent events ie. when an index gets removed. To prevent accessing closed + * a SearchContext can be closed concurrently due to independent events i.e. when an index gets removed. To prevent accessing closed * IndexReader / IndexSearcher instances the SearchContext can be guarded by a reference count and fail if it's been closed by * an external event. */ diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index d70ec62c7af60..c5e921660a16c 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -149,7 +149,7 @@ private TopDocs combine(TopDocs in, TopDocs resorted, QueryRescoreContext ctx) { } // TODO: this is wrong, i.e. we are comparing apples and oranges at this point. It would be better if we always rescored all - // incoming first pass hits, instead of allowing recoring of just the top subset: + // incoming first pass hits, instead of allowing rescoring of just the top subset: Arrays.sort(in.scoreDocs, SCORE_DOC_COMPARATOR); } return in; diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java index 2401b9ff32900..ec73d8e6537a2 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java @@ -29,7 +29,7 @@ public class RescoreContext { private final int windowSize; private final Rescorer rescorer; - private Set resroredDocs; //doc Ids for which rescoring was applied + private Set restoredDocs; //doc Ids for which rescoring was applied /** * Build the context. @@ -55,10 +55,10 @@ public int getWindowSize() { } public void setRescoredDocs(Set docIds) { - resroredDocs = docIds; + restoredDocs = docIds; } public boolean isRescored(int docId) { - return resroredDocs.contains(docId); + return restoredDocs.contains(docId); } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java index d93ef42ee4ff7..2d3fdfc8802b1 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/CandidateScorer.java @@ -36,7 +36,7 @@ final class CandidateScorer { } - public Correction[] findBestCandiates(CandidateSet[] sets, float errorFraction, double cutoffScore) throws IOException { + public Correction[] findBestCandidates(CandidateSet[] sets, float errorFraction, double cutoffScore) throws IOException { if (sets.length == 0) { return Correction.EMPTY; } @@ -46,13 +46,13 @@ protected boolean lessThan(Correction a, Correction b) { return a.compareTo(b) < 0; } }; - int numMissspellings = 1; + int numMisspellings = 1; if (errorFraction >= 1.0) { - numMissspellings = (int) errorFraction; + numMisspellings = (int) errorFraction; } else { - numMissspellings = Math.round(errorFraction * sets.length); + numMisspellings = Math.round(errorFraction * sets.length); } - findCandidates(sets, new Candidate[sets.length], 0, Math.max(1, numMissspellings), corrections, cutoffScore, 0.0); + findCandidates(sets, new Candidate[sets.length], 0, Math.max(1, numMisspellings), corrections, cutoffScore, 0.0); Correction[] result = new Correction[corrections.size()]; for (int i = result.length - 1; i >= 0; i--) { result[i] = corrections.pop(); @@ -62,26 +62,26 @@ protected boolean lessThan(Correction a, Correction b) { } - public void findCandidates(CandidateSet[] candidates, Candidate[] path, int ord, int numMissspellingsLeft, + public void findCandidates(CandidateSet[] candidates, Candidate[] path, int ord, int numMisspellingsLeft, PriorityQueue corrections, double cutoffScore, final double pathScore) throws IOException { CandidateSet current = candidates[ord]; if (ord == candidates.length - 1) { path[ord] = current.originalTerm; updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); - if (numMissspellingsLeft > 0) { + if (numMisspellingsLeft > 0) { for (int i = 0; i < current.candidates.length; i++) { path[ord] = current.candidates[i]; updateTop(candidates, path, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); } } } else { - if (numMissspellingsLeft > 0) { + if (numMisspellingsLeft > 0) { path[ord] = current.originalTerm; - findCandidates(candidates, path, ord + 1, numMissspellingsLeft, corrections, cutoffScore, + findCandidates(candidates, path, ord + 1, numMisspellingsLeft, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); for (int i = 0; i < current.candidates.length; i++) { path[ord] = current.candidates[i]; - findCandidates(candidates, path, ord + 1, numMissspellingsLeft - 1, corrections, cutoffScore, + findCandidates(candidates, path, ord + 1, numMisspellingsLeft - 1, corrections, cutoffScore, pathScore + scorer.score(path, candidates, ord, gramSize)); } } else { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java index c4a6f15e03ced..4ca2483c6aed5 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -119,7 +119,7 @@ public static SmoothingModel fromXContent(XContentParser parser) throws IOExcept @Override public WordScorerFactory buildWordScorerFactory() { - return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - -> new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, alpha); + return (IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator) + -> new LaplaceScorer(reader, terms, field, realWordLikelihood, separator, alpha); } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java index 52157a0fe8bde..2dc5803dfb27b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LaplaceScorer.java @@ -29,8 +29,8 @@ final class LaplaceScorer extends WordScorer { private double alpha; LaplaceScorer(IndexReader reader, Terms terms, String field, - double realWordLikelyhood, BytesRef separator, double alpha) throws IOException { - super(reader, terms, field, realWordLikelyhood, separator); + double realWordLikelihood, BytesRef separator, double alpha) throws IOException { + super(reader, terms, field, realWordLikelihood, separator); this.alpha = alpha; } @@ -40,7 +40,7 @@ final class LaplaceScorer extends WordScorer { @Override protected double scoreUnigram(Candidate word) throws IOException { - return (alpha + frequency(word.term)) / (vocabluarySize + alpha * numTerms); + return (alpha + frequency(word.term)) / (vocabularySize + alpha * numTerms); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolatingScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolatingScorer.java index b0c9552f8a8d6..aca226ea41671 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolatingScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolatingScorer.java @@ -32,9 +32,9 @@ public final class LinearInterpolatingScorer extends WordScorer { private final double bigramLambda; private final double trigramLambda; - public LinearInterpolatingScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator, + public LinearInterpolatingScorer(IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator, double trigramLambda, double bigramLambda, double unigramLambda) throws IOException { - super(reader, terms, field, realWordLikelyhood, separator); + super(reader, terms, field, realWordLikelihood, separator); double sum = unigramLambda + bigramLambda + trigramLambda; this.unigramLambda = unigramLambda / sum; this.bigramLambda = bigramLambda / sum; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java index e609be1d77c18..d38b34f50eeb7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java @@ -168,8 +168,8 @@ public static LinearInterpolation fromXContent(XContentParser parser) throws IOE @Override public WordScorerFactory buildWordScorerFactory() { - return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) -> - new LinearInterpolatingScorer(reader, terms, field, realWordLikelyhood, separator, trigramLambda, bigramLambda, + return (IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator) -> + new LinearInterpolatingScorer(reader, terms, field, realWordLikelihood, separator, trigramLambda, bigramLambda, unigramLambda); } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java index 635fa64c59b53..e92b8565ecd52 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java @@ -38,14 +38,14 @@ //TODO public for tests public final class NoisyChannelSpellChecker { - public static final double REAL_WORD_LIKELYHOOD = 0.95d; + public static final double REAL_WORD_LIKELIHOOD = 0.95d; public static final int DEFAULT_TOKEN_LIMIT = 10; private final double realWordLikelihood; private final boolean requireUnigram; private final int tokenLimit; public NoisyChannelSpellChecker() { - this(REAL_WORD_LIKELYHOOD); + this(REAL_WORD_LIKELIHOOD); } public NoisyChannelSpellChecker(double nonErrorLikelihood) { @@ -126,7 +126,7 @@ public void end() { double inputPhraseScore = scorer.score(candidates, candidateSets); cutoffScore = inputPhraseScore * confidence; } - Correction[] bestCandidates = scorer.findBestCandiates(candidateSets, maxErrors, cutoffScore); + Correction[] bestCandidates = scorer.findBestCandidates(candidateSets, maxErrors, cutoffScore); return new Result(bestCandidates, cutoffScore); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 10112ad2f43dd..413afd155d45b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -69,7 +69,7 @@ private PhraseSuggester() {} @Override public Suggestion> innerExecute(String name, PhraseSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException { - double realWordErrorLikelihood = suggestion.realworldErrorLikelyhood(); + double realWordErrorLikelihood = suggestion.realworldErrorLikelihood(); final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize()); final IndexReader indexReader = searcher.getIndexReader(); List generators = suggestion.generators(); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java index 4fd37d01ca5ee..84987f30e87f7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java @@ -40,8 +40,8 @@ class PhraseSuggestionContext extends SuggestionContext { static final float DEFAULT_RWE_ERRORLIKELIHOOD = 0.95f; static final float DEFAULT_MAX_ERRORS = 0.5f; static final String DEFAULT_SEPARATOR = " "; - static final WordScorer.WordScorerFactory DEFAULT_SCORER = (IndexReader reader, Terms terms, String field, double realWordLikelyhood, - BytesRef separator) -> new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, 0.4f); + static final WordScorer.WordScorerFactory DEFAULT_SCORER = (IndexReader reader, Terms terms, String field, double realWordLikelihood, + BytesRef separator) -> new StupidBackoffScorer(reader, terms, field, realWordLikelihood, separator, 0.4f); private float maxErrors = DEFAULT_MAX_ERRORS; private BytesRef separator = new BytesRef(DEFAULT_SEPARATOR); @@ -78,7 +78,7 @@ public void setSeparator(BytesRef separator) { this.separator = separator; } - public Float realworldErrorLikelyhood() { + public Float realworldErrorLikelihood() { return realworldErrorLikelihood; } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java index c7edde8bbaf76..29d03890b2791 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java @@ -122,7 +122,7 @@ public static SmoothingModel fromXContent(XContentParser parser) throws IOExcept @Override public WordScorerFactory buildWordScorerFactory() { - return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - -> new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, discount); + return (IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator) + -> new StupidBackoffScorer(reader, terms, field, realWordLikelihood, separator, discount); } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java index d6862f384bebf..54493acf8a592 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoffScorer.java @@ -29,8 +29,8 @@ class StupidBackoffScorer extends WordScorer { private final double discount; StupidBackoffScorer(IndexReader reader, Terms terms,String field, - double realWordLikelyhood, BytesRef separator, double discount) throws IOException { - super(reader, terms, field, realWordLikelyhood, separator); + double realWordLikelihood, BytesRef separator, double discount) throws IOException { + super(reader, terms, field, realWordLikelihood, separator); this.discount = discount; } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java index b13f33f76394b..1a9f2ddb90bba 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java @@ -36,26 +36,26 @@ public abstract class WordScorer { protected final IndexReader reader; protected final String field; protected final Terms terms; - protected final long vocabluarySize; - protected final double realWordLikelyhood; + protected final long vocabularySize; + protected final double realWordLikelihood; protected final BytesRefBuilder spare = new BytesRefBuilder(); protected final BytesRef separator; protected final long numTerms; private final TermsEnum termsEnum; private final boolean useTotalTermFreq; - public WordScorer(IndexReader reader, String field, double realWordLikelyHood, BytesRef separator) throws IOException { - this(reader, MultiTerms.getTerms(reader, field), field, realWordLikelyHood, separator); + public WordScorer(IndexReader reader, String field, double realWordLikelihood, BytesRef separator) throws IOException { + this(reader, MultiTerms.getTerms(reader, field), field, realWordLikelihood, separator); } - public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelyHood, BytesRef separator) throws IOException { + public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelihood, BytesRef separator) throws IOException { this.field = field; if (terms == null) { throw new IllegalArgumentException("Field: [" + field + "] does not exist"); } this.terms = terms; final long vocSize = terms.getSumTotalTermFreq(); - this.vocabluarySize = vocSize == -1 ? reader.maxDoc() : vocSize; + this.vocabularySize = vocSize == -1 ? reader.maxDoc() : vocSize; this.useTotalTermFreq = vocSize != -1; // terms.size() might be -1 if it's a MultiTerms instance. In that case, // use reader.maxDoc() as an approximation. This also protects from @@ -65,7 +65,7 @@ public WordScorer(IndexReader reader, Terms terms, String field, double realWord this.termsEnum = new FreqTermsEnum(reader, field, !useTotalTermFreq, useTotalTermFreq, null, BigArrays.NON_RECYCLING_INSTANCE); // non recycling for now this.reader = reader; - this.realWordLikelyhood = realWordLikelyHood; + this.realWordLikelihood = realWordLikelihood; this.separator = separator; } @@ -78,7 +78,7 @@ public long frequency(BytesRef term) throws IOException { protected double channelScore(Candidate candidate, Candidate original) throws IOException { if (candidate.stringDistance == 1.0d) { - return realWordLikelyhood; + return realWordLikelihood; } return candidate.stringDistance; } @@ -94,7 +94,7 @@ public double score(Candidate[] path, CandidateSet[] candidateSet, int at, int g } protected double scoreUnigram(Candidate word) throws IOException { - return (1.0 + frequency(word.term)) / (vocabluarySize + numTerms); + return (1.0 + frequency(word.term)) / (vocabularySize + numTerms); } protected double scoreBigram(Candidate word, Candidate w_1) throws IOException { @@ -117,6 +117,6 @@ public static BytesRef join(BytesRef separator, BytesRefBuilder result, BytesRef public interface WordScorerFactory { WordScorer newScorer(IndexReader reader, Terms terms, - String field, double realWordLikelyhood, BytesRef separator) throws IOException; + String field, double realWordLikelihood, BytesRef separator) throws IOException; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 8c505d20d17ff..566719c2818ef 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -398,7 +398,7 @@ public ClusterState execute(ClusterState currentState) { } if (entry.state() != State.ABORTED) { - // Replace the snapshot that was just intialized + // Replace the snapshot that was just initialized ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 237e73e572ae3..fcbfe7e0966e9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -338,7 +338,7 @@ private static int parsePort(String remoteHost) { } private static int indexOfPortSeparator(String remoteHost) { - int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 + int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address i.e. [::1]:9300 if (portSeparator == -1 || portSeparator == remoteHost.length()) { throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 7ea55925262ff..75a1079b15f8c 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -146,7 +146,7 @@ private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, Discovery if (proxyAddress == null || proxyAddress.isEmpty()) { return node; } else { - // resovle proxy address lazy here + // resolve proxy address lazy here InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index 4a8a061602a52..db6ac01152430 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -74,7 +74,7 @@ public interface Transport extends LifecycleComponent { TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException; /** - * Returns a list of all local adresses for this transport + * Returns a list of all local addresses for this transport */ List getLocalAddresses(); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java b/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java index bc57c62ca8d70..d993f2e343a6e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportMessageListener.java @@ -59,8 +59,8 @@ default void onRequestSent(DiscoveryNode node, long requestId, String action, Tr /** * Called for every response received - * @param requestId the request id for this reponse - * @param context the response context or null if the context was already processed ie. due to a timeout. + * @param requestId the request id for this response + * @param context the response context or null if the context was already processed i.e. due to a timeout. */ default void onResponseReceived(long requestId, Transport.ResponseContext context) {} } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index b5e97ac3ae6cb..8e08f85e8b8a1 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -606,7 +606,7 @@ private void sendRequestInternal(final Transport.C Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); - // TODO we can probably fold this entire request ID dance into connection.sendReqeust but it will be a bigger refactoring + // TODO we can probably fold this entire request ID dance into connection.sendRequest but it will be a bigger refactoring final long requestId = responseHandlers.add(new Transport.ResponseContext<>(responseHandler, connection, action)); final TimeoutHandler timeoutHandler; if (options.timeout() != null) { diff --git a/server/src/test/java/org/apache/lucene/queries/SpanMatchNoDocsQueryTests.java b/server/src/test/java/org/apache/lucene/queries/SpanMatchNoDocsQueryTests.java index 6187fc1f7f6d9..c0891b4c755a0 100644 --- a/server/src/test/java/org/apache/lucene/queries/SpanMatchNoDocsQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/SpanMatchNoDocsQueryTests.java @@ -60,7 +60,7 @@ public void testQuery() throws Exception { IndexReader ir = DirectoryReader.open(iw); IndexSearcher searcher = new IndexSearcher(ir); - Query query = new SpanMatchNoDocsQuery("unkwown", "field not found"); + Query query = new SpanMatchNoDocsQuery("unknown", "field not found"); assertEquals(searcher.count(query), 0); ScoreDoc[] hits; diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 08db8dfaf2100..43c9b6b948c56 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -184,7 +184,7 @@ public void testMinCompatVersion() { assertThat(Version.V_6_5_0.minimumCompatibilityVersion(), equalTo(major56x)); assertThat(Version.V_6_3_1.minimumCompatibilityVersion(), equalTo(major56x)); - // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is + // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version i.e. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() Version lastVersion = Version.V_6_7_0; // TODO: remove this once min compat version is a constant instead of method assertEquals(lastVersion.major, Version.V_7_0_0.minimumCompatibilityVersion().major); @@ -339,7 +339,7 @@ public void testLuceneVersionIsSameOnMinorRelease() { } public static void assertUnknownVersion(Version version) { - assertFalse("Version " + version + " has been releaed don't use a new instance of this version", + assertFalse("Version " + version + " has been released don't use a new instance of this version", VersionUtils.allReleasedVersions().contains(version)); } diff --git a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java index 6414c81058bec..13f40fe560c97 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java @@ -133,7 +133,7 @@ public void testOnFailure() { try { ActionListener.onFailure(listeners, new Exception("booom")); - assertTrue("unexpected succces listener to fail: " + listenerToFail, listenerToFail == -1); + assertTrue("unexpected success listener to fail: " + listenerToFail, listenerToFail == -1); } catch (RuntimeException ex) { assertTrue("listener to fail: " + listenerToFail, listenerToFail >= 0); assertNotNull(ex.getCause()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java index a2e645b457a8a..ef08aa2d421a1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java @@ -117,7 +117,7 @@ public void testRetry() throws Exception { */ private NodeClient nodeClient() { /* - * Luckilly our test infrastructure already returns it, but we can't + * Luckily our test infrastructure already returns it, but we can't * change the return type in the superclass because it is wrapped other * places. */ diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index a2147544a87db..bbaa29c8a87fa 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -723,7 +723,7 @@ public void testTasksWaitForAllTask() throws Exception { assertThat(response.getTasks().size(), greaterThanOrEqualTo(1)); } - public void testTaskStoringSuccesfulResult() throws Exception { + public void testTaskStoringSuccessfulResult() throws Exception { registerTaskManageListeners(TestTaskPlugin.TestTaskAction.NAME); // we need this to get task id of the process // Start non-blocking test task diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index f6ca1c4f742a0..29a65bf3a1557 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -140,7 +140,7 @@ protected TestPlan createTestPlan() { final Map> indicesResults = new HashMap<>(); final int indexCount = randomIntBetween(1, 10); int totalShards = 0; - int totalSuccesful = 0; + int totalSuccessful = 0; int totalFailed = 0; for (int i = 0; i < indexCount; i++) { final String index = "index_" + i; @@ -180,10 +180,10 @@ protected TestPlan createTestPlan() { testPlan.expectedFailuresPerIndex.put(index, failures); totalFailed += failed; totalShards += shards * (replicas + 1); - totalSuccesful += successful; + totalSuccessful += successful; } testPlan.result = new SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); + testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed); return testPlan; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index cbc9499cda327..ec3c82ba70b2f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -165,7 +165,7 @@ public void testCreateUpdateAliasRequest() { assertEquals(sourceAlias, ((AliasAction.Remove) action).getAlias()); foundRemove = true; } else { - throw new AssertionError("Unknow index [" + action.getIndex() + "]"); + throw new AssertionError("Unknown index [" + action.getIndex() + "]"); } } assertTrue(foundAdd); @@ -195,7 +195,7 @@ public void testCreateUpdateAliasRequestWithExplicitWriteIndex() { assertFalse(addAction.writeIndex()); foundRemoveWrite = true; } else { - throw new AssertionError("Unknow index [" + action.getIndex() + "]"); + throw new AssertionError("Unknown index [" + action.getIndex() + "]"); } } assertTrue(foundAddWrite); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java index 1d63db7585e65..b11465025b385 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponseTests.java @@ -35,7 +35,7 @@ public class IndicesSegmentResponseTests extends ESTestCase { - public void testToXContentSerialiationWithSortedFields() throws Exception { + public void testToXContentSerializationWithSortedFields() throws Exception { ShardRouting shardRouting = TestShardRouting.newShardRouting("foo", 0, "node_id", true, ShardRoutingState.STARTED); Segment segment = new Segment("my"); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index a3618ffa16f9d..eb85be7750bbe 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -176,7 +176,7 @@ public void onFailure(Exception e) { } }; DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); - // for the sake of this test we place the replica on the same node. ie. this is not a mistake since we limit per node now + // for the sake of this test we place the replica on the same node. i.e. this is not a mistake since we limit per node now DiscoveryNode replicaNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); AtomicInteger contextIdGenerator = new AtomicInteger(0); diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index a5427952edcf6..61d56596cc849 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -117,10 +117,10 @@ public void testFromOptions() { boolean allowAliasesToMultipleIndices = randomBoolean(); boolean forbidClosedIndices = randomBoolean(); boolean ignoreAliases = randomBoolean(); - boolean ingoreThrottled = randomBoolean(); + boolean ignoreThrottled = randomBoolean(); IndicesOptions indicesOptions = IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices,expandToOpenIndices, - expandToClosedIndices, allowAliasesToMultipleIndices, forbidClosedIndices, ignoreAliases, ingoreThrottled); + expandToClosedIndices, allowAliasesToMultipleIndices, forbidClosedIndices, ignoreAliases, ignoreThrottled); assertThat(indicesOptions.ignoreUnavailable(), equalTo(ignoreUnavailable)); assertThat(indicesOptions.allowNoIndices(), equalTo(allowNoIndices)); @@ -130,7 +130,7 @@ public void testFromOptions() { assertThat(indicesOptions.allowAliasesToMultipleIndices(), equalTo(allowAliasesToMultipleIndices)); assertThat(indicesOptions.forbidClosedIndices(), equalTo(forbidClosedIndices)); assertEquals(ignoreAliases, indicesOptions.ignoreAliases()); - assertEquals(ingoreThrottled, indicesOptions.ignoreThrottled()); + assertEquals(ignoreThrottled, indicesOptions.ignoreThrottled()); } public void testFromOptionsWithDefaultOptions() { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 60053748d68c9..4be2c073ac74d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -111,9 +111,9 @@ public static ClusterState state(String index, boolean activePrimaryLocal, Shard primaryNode = newNode(0).getId(); unassignedNodes.remove(primaryNode); } else { - Set unassignedNodesExecludingPrimary = new HashSet<>(unassignedNodes); - unassignedNodesExecludingPrimary.remove(newNode(0).getId()); - primaryNode = selectAndRemove(unassignedNodesExecludingPrimary); + Set unassignedNodesExcludingPrimary = new HashSet<>(unassignedNodes); + unassignedNodesExcludingPrimary.remove(newNode(0).getId()); + primaryNode = selectAndRemove(unassignedNodesExcludingPrimary); unassignedNodes.remove(primaryNode); } if (primaryState == ShardRoutingState.RELOCATING) { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 442e27c0867b9..b7be4972f26f0 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -309,7 +309,7 @@ public void testRandomSingleTermVectors() throws IOException { // many shards and do not know how documents are distributed PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); // docs and pos only returns something if positions or - // payloads or offsets are stored / requestd Otherwise use + // payloads or offsets are stored / requested Otherwise use // DocsEnum? assertThat(infoString, docsAndPositions.nextDoc(), equalTo(0)); assertThat(infoString, freq[j], equalTo(docsAndPositions.freq())); diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index c4fcb9bdb53e2..d1549bbe3a975 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -134,8 +134,8 @@ public void testFillShardLevelInfo() { } public void testFillDiskUsage() { - ImmutableOpenMap.Builder newLeastAvaiableUsages = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder newMostAvaiableUsages = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder newLeastAvailableUsages = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder newMostAvailableUsages = ImmutableOpenMap.builder(); FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70), @@ -157,19 +157,19 @@ null,null,null,null,null, new FsInfo(0, null, node2FSInfo), null,null,null,null, new NodeStats(new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null,null,null,null,null, new FsInfo(0, null, node3FSInfo), null,null,null,null,null, null, null) ); - InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); - DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1"); - DiskUsage mostNode_1 = newMostAvaiableUsages.get("node_1"); + InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages); + DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1"); + DiskUsage mostNode_1 = newMostAvailableUsages.get("node_1"); assertDiskUsage(mostNode_1, node1FSInfo[2]); assertDiskUsage(leastNode_1, node1FSInfo[1]); - DiskUsage leastNode_2 = newLeastAvaiableUsages.get("node_2"); - DiskUsage mostNode_2 = newMostAvaiableUsages.get("node_2"); + DiskUsage leastNode_2 = newLeastAvailableUsages.get("node_2"); + DiskUsage mostNode_2 = newMostAvailableUsages.get("node_2"); assertDiskUsage(leastNode_2, node2FSInfo[0]); assertDiskUsage(mostNode_2, node2FSInfo[0]); - DiskUsage leastNode_3 = newLeastAvaiableUsages.get("node_3"); - DiskUsage mostNode_3 = newMostAvaiableUsages.get("node_3"); + DiskUsage leastNode_3 = newLeastAvailableUsages.get("node_3"); + DiskUsage mostNode_3 = newMostAvailableUsages.get("node_3"); assertDiskUsage(leastNode_3, node3FSInfo[0]); assertDiskUsage(mostNode_3, node3FSInfo[1]); } diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 14919b7e9f0bc..3ddea040f37ad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -254,9 +254,9 @@ public void testIndicesOptions() throws Exception { assertThat(clusterStateResponse.getState().metaData().index("fuu").getState(), equalTo(IndexMetaData.State.CLOSE)); // ignore_unavailable set to true should not raise exception on fzzbzz - IndicesOptions ignoreUnavailabe = IndicesOptions.fromOptions(true, true, true, false); + IndicesOptions ignoreUnavailable = IndicesOptions.fromOptions(true, true, true, false); clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("fzzbzz") - .setIndicesOptions(ignoreUnavailabe).get(); + .setIndicesOptions(ignoreUnavailable).get(); assertThat(clusterStateResponse.getState().metaData().indices().isEmpty(), is(true)); // empty wildcard expansion result should work when allowNoIndices is diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 3d945b7a7bb68..2a0e8c43db6ab 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -109,7 +109,7 @@ public void testElectOnlyBetweenMasterNodes() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> start master node (2)"); - final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder() + final String nextMasterEligibleNodeName = internalCluster().startNode(Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); @@ -121,9 +121,9 @@ public void testElectOnlyBetweenMasterNodes() throws IOException { logger.info("--> closing master node (1)"); internalCluster().stopCurrentMasterNode(); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() - .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState() - .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } public void testAliasFilterValidation() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 7a1e63245a679..8ede7edbfa0b4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1536,7 +1536,7 @@ AckCollector submitValue(final long value) { AckCollector submitUpdateTask(String source, UnaryOperator clusterStateUpdate) { final AckCollector ackCollector = new AckCollector(); onNode(localNode, () -> { - logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source); + logger.trace("[{}] submitUpdateTask: enqueuing [{}]", localNode.getId(), source); final long submittedTerm = coordinator.getCurrentTerm(); masterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java index 1b854d17a619e..bbe3ae124c6e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java @@ -83,12 +83,12 @@ public void testXContent() throws IOException { public void testAddTombstones() { final IndexGraveyard graveyard1 = createRandom(); - final IndexGraveyard.Builder graveyardBuidler = IndexGraveyard.builder(graveyard1); + final IndexGraveyard.Builder graveyardBuilder = IndexGraveyard.builder(graveyard1); final int numAdds = randomIntBetween(0, 4); for (int j = 0; j < numAdds; j++) { - graveyardBuidler.addTombstone(new Index("nidx-" + j, UUIDs.randomBase64UUID())); + graveyardBuilder.addTombstone(new Index("nidx-" + j, UUIDs.randomBase64UUID())); } - final IndexGraveyard graveyard2 = graveyardBuidler.build(); + final IndexGraveyard graveyard2 = graveyardBuilder.build(); if (numAdds == 0) { assertThat(graveyard2, equalTo(graveyard1)); } else { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java index 8a9b00a8d4ff7..a182a9f8a6131 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java @@ -221,7 +221,7 @@ public void testPrimaryTermMetaDataSync() { failSomePrimaries(TEST_INDEX_1); assertAllPrimaryTerm(); - // stablize cluster + // stabilize cluster changed = true; while (changed) { changed = startInitializingShards(TEST_INDEX_1); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java index 3fcd743a8a1d4..14ed16f82decc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecisionTests.java @@ -136,9 +136,9 @@ public void testYesDecision() { } public void testCachedDecisions() { - List cachableStatuses = Arrays.asList(AllocationStatus.DECIDERS_NO, AllocationStatus.DECIDERS_THROTTLED, + List cacheableStatuses = Arrays.asList(AllocationStatus.DECIDERS_NO, AllocationStatus.DECIDERS_THROTTLED, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA, AllocationStatus.DELAYED_ALLOCATION); - for (AllocationStatus allocationStatus : cachableStatuses) { + for (AllocationStatus allocationStatus : cacheableStatuses) { if (allocationStatus == AllocationStatus.DECIDERS_THROTTLED) { AllocateUnassignedDecision cached = AllocateUnassignedDecision.throttle(null); AllocateUnassignedDecision another = AllocateUnassignedDecision.throttle(null); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 2ce0b7b89bec2..4de33cf7ff779 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -223,7 +223,7 @@ public void testFailedAllocation() { routingTable.index("idx").shard(0).shards().get(0))); routingTable = clusterState.routingTable(); - // all counters have been reset to 0 ie. no unassigned info + // all counters have been reset to 0 i.e. no unassigned info assertEquals(routingTable.index("idx").shards().size(), 1); assertNull(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo()); assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), STARTED); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index fe7c4a89c9fa2..27b53c29d9efe 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -164,7 +164,7 @@ public void testRandomDecisions() { } while (clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200); logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState); - // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong + // we stop after 200 iterations if it didn't stabilize by then something is likely to be wrong assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 87339868e4c2c..29a0354ce84db 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -230,7 +230,7 @@ public void testIndexLevelShardsLimitRemain() { // moving the nodes to node2 since we consider INITIALIZING nodes during rebalance routingNodes = clusterState.getRoutingNodes(); clusterState = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); - // now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on the average + // now we are done compared to EvenShardCountAllocator since the Balancer is not solely based on the average assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(5)); assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(5)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index c674b8c3a292d..273d504b80084 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -127,7 +127,7 @@ public void testSingleIndexStartedShard() { assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED)); assertThat(clusterState.routingTable().index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); - logger.info("Bring node1 back, and see it's assinged"); + logger.info("Bring node1 back, and see it's assigned"); clusterState = ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node1"))).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 3c88de4b639ca..15231c6fdce9f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -80,7 +80,7 @@ public void testPrimaryRecoveryThrottling() { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1)) .build(); - ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator); + ClusterState clusterState = createRecoveryStateAndInitializeAllocations(metaData, gatewayAllocator); logger.info("start one node, do reroute, only 3 should initialize"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); @@ -134,7 +134,7 @@ public void testReplicaAndPrimaryRecoveryThrottling() { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1)) .build(); - ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator); + ClusterState clusterState = createRecoveryStateAndInitializeAllocations(metaData, gatewayAllocator); logger.info("with one node, do reroute, only 3 should initialize"); clusterState = strategy.reroute(clusterState, "reroute"); @@ -195,7 +195,7 @@ public void testThrottleIncomingAndOutgoing() { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(9).numberOfReplicas(0)) .build(); - ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator); + ClusterState clusterState = createRecoveryStateAndInitializeAllocations(metaData, gatewayAllocator); logger.info("with one node, do reroute, only 5 should initialize"); clusterState = strategy.reroute(clusterState, "reroute"); @@ -252,7 +252,7 @@ public void testOutgoingThrottlesAllocation() { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) .build(); - ClusterState clusterState = createRecoveryStateAndInitalizeAllocations(metaData, gatewayAllocator); + ClusterState clusterState = createRecoveryStateAndInitializeAllocations(metaData, gatewayAllocator); logger.info("with one node, do reroute, only 1 should initialize"); clusterState = strategy.reroute(clusterState, "reroute"); @@ -330,7 +330,7 @@ public void testOutgoingThrottlesAllocation() { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); } - private ClusterState createRecoveryStateAndInitalizeAllocations(MetaData metaData, TestGatewayAllocator gatewayAllocator) { + private ClusterState createRecoveryStateAndInitializeAllocations(MetaData metaData, TestGatewayAllocator gatewayAllocator) { DiscoveryNode node1 = newNode("node1"); MetaData.Builder metaDataBuilder = new MetaData.Builder(metaData); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); diff --git a/server/src/test/java/org/elasticsearch/common/PriorityTests.java b/server/src/test/java/org/elasticsearch/common/PriorityTests.java index 06bbab6bf58d2..c4c1f4d732217 100644 --- a/server/src/test/java/org/elasticsearch/common/PriorityTests.java +++ b/server/src/test/java/org/elasticsearch/common/PriorityTests.java @@ -81,10 +81,10 @@ public void testCompareTo() { for (Priority p : Priority.values()) { assertEquals(0, p.compareTo(p)); } - List shuffeledAndSorted = Arrays.asList(Priority.values()); - Collections.shuffle(shuffeledAndSorted, random()); - Collections.sort(shuffeledAndSorted); - for (List priorities : Arrays.asList(shuffeledAndSorted, + List shuffledAndSorted = Arrays.asList(Priority.values()); + Collections.shuffle(shuffledAndSorted, random()); + Collections.sort(shuffledAndSorted); + for (List priorities : Arrays.asList(shuffledAndSorted, Arrays.asList(Priority.values()))) { // #values() guarantees order! assertSame(Priority.IMMEDIATE, priorities.get(0)); assertSame(Priority.URGENT, priorities.get(1)); diff --git a/server/src/test/java/org/elasticsearch/common/RoundingTests.java b/server/src/test/java/org/elasticsearch/common/RoundingTests.java index 1664f67a44df9..2331b65776517 100644 --- a/server/src/test/java/org/elasticsearch/common/RoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/RoundingTests.java @@ -286,7 +286,7 @@ public void testTimeInterval_Kathmandu_DST_Start() { * In this case, when interval crosses DST transition point, rounding in local * time can land in a DST gap which results in wrong UTC rounding values. */ - public void testIntervalRounding_NotDivisibleInteval() { + public void testIntervalRounding_NotDivisibleInterval() { long interval = TimeUnit.MINUTES.toMillis(14); ZoneId tz = ZoneId.of("CET"); Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index f574c8d9fe5f1..e7e00d285505f 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -29,7 +29,7 @@ import java.util.NoSuchElementException; public class IteratorsTests extends ESTestCase { - public void testConcatentation() { + public void testConcatenation() { List threeTwoOne = Arrays.asList(3, 2, 1); List fourFiveSix = Arrays.asList(4, 5, 6); Iterator concat = Iterators.concat(threeTwoOne.iterator(), fourFiveSix.iterator()); diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index 0d4f142785484..4e15a646c0849 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -48,7 +48,7 @@ static PolygonBuilder mutate(PolygonBuilder original) throws IOException { static PolygonBuilder mutatePolygonBuilder(PolygonBuilder pb) { if (randomBoolean()) { - pb = polyWithOposingOrientation(pb); + pb = polyWithOpposingOrientation(pb); } else { // change either point in shell or in random hole LineStringBuilder lineToChange; @@ -79,7 +79,7 @@ static PolygonBuilder mutatePolygonBuilder(PolygonBuilder pb) { * Takes an input polygon and returns an identical one, only with opposing orientation setting. * This is done so we don't have to expose a setter for orientation in the actual class */ - private static PolygonBuilder polyWithOposingOrientation(PolygonBuilder pb) { + private static PolygonBuilder polyWithOpposingOrientation(PolygonBuilder pb) { PolygonBuilder mutation = new PolygonBuilder(pb.shell(), pb.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); for (LineStringBuilder hole : pb.holes()) { @@ -91,7 +91,7 @@ private static PolygonBuilder polyWithOposingOrientation(PolygonBuilder pb) { static PolygonBuilder createRandomShape() { PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(random(), ShapeType.POLYGON); if (randomBoolean()) { - pgb = polyWithOposingOrientation(pgb); + pgb = polyWithOpposingOrientation(pgb); } return pgb; } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index adfe90755dd03..953a78886f61c 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -170,7 +170,7 @@ public void testSingleFullPageBulkWriteWithOffsetCrossover() throws Exception { // now write the rest - more than fits into the remaining page + a full page after // that, - // ie. we cross over into a third + // i.e. we cross over into a third out.writeBytes(expectedData, initialOffset, additionalLength); assertEquals(expectedData.length, out.size()); assertArrayEquals(expectedData, BytesReference.toBytes(out.bytes())); diff --git a/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index e49f25772a726..f03b3b7e884ab 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -278,7 +278,7 @@ public void testTimeInterval_Kathmandu_DST_Start() { * In this case, when interval crosses DST transition point, rounding in local * time can land in a DST gap which results in wrong UTC rounding values. */ - public void testIntervalRounding_NotDivisibleInteval() { + public void testIntervalRounding_NotDivisibleInterval() { DateTimeZone tz = DateTimeZone.forID("CET"); long interval = TimeUnit.MINUTES.toMillis(14); Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 9194a60382d0d..226391663ffbb 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -975,7 +975,7 @@ public void testUpdateNumberOfShardsFail() { () -> settings.updateSettings(Settings.builder().put("index.number_of_shards", 8).build(), Settings.builder(), Settings.builder(), "index")); assertThat(ex.getMessage(), - containsString("final index setting [index.number_of_shards], not updateable")); + containsString("final index setting [index.number_of_shards], not updatable")); } public void testFinalSettingUpdateFail() { diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 750c7148946fc..f6489a225aa80 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -277,14 +277,14 @@ public void testDefault() { // It gets more complicated when there are two settings objects.... Settings hasFallback = Settings.builder().put("foo.bar", "o").build(); - Setting fallsback = + Setting fallsBack = new Setting<>("foo.baz", secondaryDefault, Function.identity(), Property.NodeScope); - assertEquals("o", fallsback.get(hasFallback)); - assertEquals("some_default", fallsback.get(Settings.EMPTY)); - assertEquals("some_default", fallsback.get(Settings.EMPTY, Settings.EMPTY)); - assertEquals("o", fallsback.get(Settings.EMPTY, hasFallback)); - assertEquals("o", fallsback.get(hasFallback, Settings.EMPTY)); - assertEquals("a", fallsback.get( + assertEquals("o", fallsBack.get(hasFallback)); + assertEquals("some_default", fallsBack.get(Settings.EMPTY)); + assertEquals("some_default", fallsBack.get(Settings.EMPTY, Settings.EMPTY)); + assertEquals("o", fallsBack.get(Settings.EMPTY, hasFallback)); + assertEquals("o", fallsBack.get(hasFallback, Settings.EMPTY)); + assertEquals("a", fallsBack.get( Settings.builder().put("foo.bar", "a").build(), Settings.builder().put("foo.bar", "b").build())); } diff --git a/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java b/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java index 5370815926c68..1860a952e0375 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java @@ -59,7 +59,7 @@ public void setUp() throws Exception { newHash(); } - public void testDuell() { + public void testDuel() { final int len = randomIntBetween(1, 100000); final BytesRef[] values = new BytesRef[len]; for (int i = 0; i < values.length; ++i) { diff --git a/server/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java b/server/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java index 729c431d2b2cf..2b937730e4750 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.common.util; -import org.elasticsearch.common.util.CancellableThreads.IOInterruptable; -import org.elasticsearch.common.util.CancellableThreads.Interruptable; +import org.elasticsearch.common.util.CancellableThreads.IOInterruptible; +import org.elasticsearch.common.util.CancellableThreads.Interruptible; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -62,7 +62,7 @@ private TestPlan(int id) { } } - static class TestRunnable implements Interruptable { + static class TestRunnable implements Interruptible { final TestPlan plan; final CountDownLatch readyForCancel; @@ -95,7 +95,7 @@ public void run() throws InterruptedException { } } - static class TestIORunnable implements IOInterruptable { + static class TestIORunnable implements IOInterruptible { final TestPlan plan; final CountDownLatch readyForCancel; diff --git a/server/src/test/java/org/elasticsearch/common/util/LongHashTests.java b/server/src/test/java/org/elasticsearch/common/util/LongHashTests.java index 708e7beb861ec..871a13cf0c135 100644 --- a/server/src/test/java/org/elasticsearch/common/util/LongHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/LongHashTests.java @@ -55,7 +55,7 @@ public void setUp() throws Exception { newHash(); } - public void testDuell() { + public void testDuel() { final Long[] values = new Long[randomIntBetween(1, 100000)]; for (int i = 0; i < values.length; ++i) { values[i] = randomLong(); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java index 2373b30e1b2ed..c086ab8b5885b 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java @@ -163,7 +163,7 @@ protected void doRun() throws Exception { runnable.onRejection(exception); } - public void testIsForceExecutuonDefaultsFalse() { + public void testIsForceExecutionDefaultsFalse() { AbstractRunnable runnable = new AbstractRunnable() { @Override public void onFailure(Exception e) { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index 9729aca294184..767f8a4537b16 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -361,12 +361,12 @@ public void testSerializeInDifferentContextNoDefaults() throws IOException { } { Settings otherSettings = Settings.builder().put("request.headers.default", "5").build(); - ThreadContext otherhreadContext = new ThreadContext(otherSettings); - otherhreadContext.readHeaders(out.bytes().streamInput()); + ThreadContext otherThreadContext = new ThreadContext(otherSettings); + otherThreadContext.readHeaders(out.bytes().streamInput()); - assertEquals("bar", otherhreadContext.getHeader("foo")); - assertNull(otherhreadContext.getTransient("ctx.foo")); - assertEquals("5", otherhreadContext.getHeader("default")); + assertEquals("bar", otherThreadContext.getHeader("foo")); + assertNull(otherThreadContext.getTransient("ctx.foo")); + assertEquals("5", otherThreadContext.getHeader("default")); } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index cd1a209878346..6ee2d3f89d49f 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -971,7 +971,7 @@ void doTestRawValue(XContent source) throws Exception { protected void doTestBigInteger(JsonGenerator generator, ByteArrayOutputStream os) throws Exception { // Big integers cannot be handled explicitly, but if some values happen to be big ints, - // we can still call parser.map() and get the bigint value so that eg. source filtering + // we can still call parser.map() and get the bigint value so that e.g. source filtering // keeps working BigInteger bigInteger = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE); generator.writeStartObject(); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index a8ce5830106d8..e782098a36bdb 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -357,7 +357,7 @@ public void onFailure(Exception e) { public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { // test for https://github.com/elastic/elasticsearch/issues/8823 - Settings zen1Settings = Settings.builder().put(TestZenDiscovery.USE_ZEN2.getKey(), false).build(); // TODO: needs adaptions for Zen2 + Settings zen1Settings = Settings.builder().put(TestZenDiscovery.USE_ZEN2.getKey(), false).build(); // TODO: needs adaptations for Zen2 String masterNode = internalCluster().startMasterOnlyNode(zen1Settings); internalCluster().startDataOnlyNode(zen1Settings); ensureStableCluster(2); diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 6f47590089f28..a2e0577286e39 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -137,13 +137,13 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes(); - TransportService masterTranspotService = + TransportService masterTransportService = internalCluster().getInstance(TransportService.class, discoveryNodes.getMasterNode().getName()); logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode); MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, nonMasterNode); - nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService); + nonMasterTransportService.addFailToSendNoConnectRule(masterTransportService); assertNoMaster(nonMasterNode); diff --git a/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java index c87a896d318be..07a6fbd900b54 100644 --- a/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java @@ -62,7 +62,7 @@ public void testRepositoryResolution() throws IOException { assertThat(environment.resolveRepoFile("/another/repos/repo1"), notNullValue()); assertThat(environment.resolveRepoFile("/test/repos/../repo1"), nullValue()); assertThat(environment.resolveRepoFile("/test/repos/../repos/repo1"), notNullValue()); - assertThat(environment.resolveRepoFile("/somethingeles/repos/repo1"), nullValue()); + assertThat(environment.resolveRepoFile("/somethingelse/repos/repo1"), nullValue()); assertThat(environment.resolveRepoFile("/test/other/repo"), notNullValue()); diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 4750540a178d0..f07b30fcb4d39 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -434,9 +434,9 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { final String replicaNode = internalCluster().startDataOnlyNode(nodeSettings(1)); ensureGreen(); - final RecoveryResponse initialRecoveryReponse = client().admin().indices().prepareRecoveries("test").get(); + final RecoveryResponse initialRecoveryResponse = client().admin().indices().prepareRecoveries("test").get(); final Set files = new HashSet<>(); - for (final RecoveryState recoveryState : initialRecoveryReponse.shardRecoveryStates().get("test")) { + for (final RecoveryState recoveryState : initialRecoveryResponse.shardRecoveryStates().get("test")) { if (recoveryState.getTargetNode().getName().equals(replicaNode)) { for (final RecoveryState.File file : recoveryState.getIndex().fileDetails()) { files.add(file.name()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 64a2fa69bcbd5..5d2ca4580870e 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -559,6 +559,6 @@ public void testUpdateSoftDeletesFails() { IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> settings.updateSettings(Settings.builder().put("index.soft_deletes.enabled", randomBoolean()).build(), Settings.builder(), Settings.builder(), "index")); - assertThat(error.getMessage(), equalTo("final index setting [index.soft_deletes.enabled], not updateable")); + assertThat(error.getMessage(), equalTo("final index setting [index.soft_deletes.enabled], not updatable")); } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 31995f1f7f252..c16b395e6fee3 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4936,7 +4936,7 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup engine.index(doc); engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), primaryTerm.get())); - // now index more append only docs and refresh so we re-enabel the optimization for unsafe version map + // now index more append only docs and refresh so we re-enable the optimization for unsafe version map ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); engine.index(new Engine.Index(newUid(document1), document1, UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false, UNASSIGNED_SEQ_NO, 0)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 115785b2e7b96..c26f1d8d2e20c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -257,7 +257,7 @@ public void testConcurrently() throws IOException, InterruptedException { deletes.entrySet().forEach(e -> { try (Releasable r = map.acquireLock(e.getKey())) { VersionValue value = map.getUnderLock(e.getKey()); - // here we keep track of the deletes and ensure that all deletes that are not visible anymore ie. not in the map + // here we keep track of the deletes and ensure that all deletes that are not visible anymore i.e. not in the map // have a timestamp that is smaller or equal to the maximum timestamp that we pruned on final DeleteVersionValue delete = e.getValue(); if (value == null) { @@ -280,7 +280,7 @@ public void testCarryOnSafeAccess() throws IOException { assertTrue(map.isSafeAccessRequired()); assertFalse(map.isUnsafe()); int numIters = randomIntBetween(1, 5); - for (int i = 0; i < numIters; i++) { // if we don't do anything ie. no adds etc we will stay with the safe access required + for (int i = 0; i < numIters; i++) { // if we don't do anything i.e. no adds etc we will stay with the safe access required map.beforeRefresh(); map.afterRefresh(randomBoolean()); assertTrue("failed in iter: " + i, map.isSafeAccessRequired()); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java index 33170eb39ece1..3232418d278a2 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.search.MultiValueMode; /** Returns an implementation based on paged bytes which doesn't implement WithOrdinals in order to visit different paths in the code, - * eg. BytesRefFieldComparatorSource makes decisions based on whether the field data implements WithOrdinals. */ + * e.g. BytesRefFieldComparatorSource makes decisions based on whether the field data implements WithOrdinals. */ public class NoOrdinalsStringFieldDataTests extends PagedBytesStringFieldDataTests { public static IndexFieldData hideOrdinals(final IndexFieldData in) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 6e27823f8a0c0..6355fe04cfc01 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -104,8 +104,8 @@ public void testConflictingFieldTypes() { ft2.setName("foo"); ft2.setBoost(2.0f); FieldMapper f2 = new MockFieldMapper("foo", ft2); - lookup.copyAndAddAll("type", newList(f2), emptyList()); // boost is updateable, so ok since we are implicitly updating all types - lookup.copyAndAddAll("type2", newList(f2), emptyList()); // boost is updateable, so ok if forcing + lookup.copyAndAddAll("type", newList(f2), emptyList()); // boost is updatable, so ok since we are implicitly updating all types + lookup.copyAndAddAll("type2", newList(f2), emptyList()); // boost is updatable, so ok if forcing // now with a non changeable setting MappedFieldType ft3 = new MockFieldMapper.FakeFieldType(); ft3.setName("foo"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index eae5b4ac7d2ab..f8c4df34fa6d8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -73,7 +73,7 @@ protected MappedFieldType createDefaultFieldType() { public void testIsFieldWithinQuery() throws IOException { KeywordFieldType ft = new KeywordFieldType(); - // current impl ignores args and shourd always return INTERSECTS + // current impl ignores args and should always return INTERSECTS assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(null, RandomStrings.randomAsciiOfLengthBetween(random(), 0, 5), RandomStrings.randomAsciiOfLengthBetween(random(), 0, 5), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java index 6ecf4b6408be3..8b1f3124c7412 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java @@ -35,7 +35,7 @@ public class MultiFieldCopyToMapperTests extends ESTestCase { public void testExceptionForCopyToInMultiFields() throws IOException { - XContentBuilder mapping = createMappinmgWithCopyToInMultiField(); + XContentBuilder mapping = createMappingWithCopyToInMultiField(); // first check that for newer versions we throw exception if copy_to is found within multi field MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "test"); @@ -48,7 +48,7 @@ public void testExceptionForCopyToInMultiFields() throws IOException { } } - private static XContentBuilder createMappinmgWithCopyToInMultiField() throws IOException { + private static XContentBuilder createMappingWithCopyToInMultiField() throws IOException { XContentBuilder mapping = jsonBuilder(); mapping.startObject() .startObject("type") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index a8db41e677b95..b6ac750370079 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -137,7 +137,7 @@ private void assertConflicts(String mapping1, String mapping2, DocumentMapperPar } } - public void testEnabledNotUpdateable() throws Exception { + public void testEnabledNotUpdatable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); // using default of true String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); @@ -153,7 +153,7 @@ public void testEnabledNotUpdateable() throws Exception { assertConflicts(mapping1, mapping3, parser); } - public void testIncludesNotUpdateable() throws Exception { + public void testIncludesNotUpdatable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") @@ -171,7 +171,7 @@ public void testIncludesNotUpdateable() throws Exception { assertConflicts(mapping1, mapping1, parser); } - public void testExcludesNotUpdateable() throws Exception { + public void testExcludesNotUpdatable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 74553c3be1f79..fedb28fd37c03 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -232,8 +232,8 @@ public abstract class PointTester { private double brokenCoordinate = randomFrom(brokenDoubles); private double invalidCoordinate; - public PointTester(double invalidCoodinate) { - this.invalidCoordinate = invalidCoodinate; + public PointTester(double invalidCoordinate) { + this.invalidCoordinate = invalidCoordinate; } public void invalidateCoordinate(GeoBoundingBoxQueryBuilder qb, boolean useBrokenDouble) { if (useBrokenDouble) { diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index 5615944219c91..d6a45a165d19b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -334,7 +334,7 @@ public void testItemFromXContent() throws IOException { } @Override - protected boolean isCachable(MoreLikeThisQueryBuilder queryBuilder) { + protected boolean isCacheable(MoreLikeThisQueryBuilder queryBuilder) { return queryBuilder.likeItems().length == 0; // items are always fetched } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 70f504516ec8a..01da0e22b9b20 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -535,7 +535,7 @@ public void testToQueryWildcardQuery() throws Exception { } } - public void testToQueryWilcardQueryWithSynonyms() throws Exception { + public void testToQueryWildcardQueryWithSynonyms() throws Exception { for (Operator op : Operator.values()) { BooleanClause.Occur defaultOp = op.toBooleanClauseOccur(); QueryStringQueryParser queryParser = new QueryStringQueryParser(createShardContext(), STRING_FIELD_NAME); @@ -747,7 +747,7 @@ public void testEnabledPositionIncrements() throws Exception { assertFalse(queryBuilder.enablePositionIncrements()); } - public void testToQueryFuzzyQueryAutoFuziness() throws Exception { + public void testToQueryFuzzyQueryAutoFuzziness() throws Exception { int length = randomIntBetween(1, 10); StringBuilder queryString = new StringBuilder(); for (int i = 0; i < length; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java index 0252468e717dc..b0bbca3266bab 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java @@ -117,7 +117,7 @@ protected Set getObjectsHoldingArbitraryContent() { } @Override - protected boolean isCachable(ScriptQueryBuilder queryBuilder) { + protected boolean isCacheable(ScriptQueryBuilder queryBuilder) { return false; } } diff --git a/server/src/test/java/org/elasticsearch/index/query/ScriptScoreQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ScriptScoreQueryBuilderTests.java index ef173883d0ac0..ad9af8c49c391 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ScriptScoreQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ScriptScoreQueryBuilderTests.java @@ -89,7 +89,7 @@ public void testIllegalArguments() { } @Override - protected boolean isCachable(ScriptScoreQueryBuilder queryBuilder) { + protected boolean isCacheable(ScriptScoreQueryBuilder queryBuilder) { return false; } } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 02df22fd97efb..2ab32cd2fbd59 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -140,14 +140,14 @@ protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query, } } - public void testEmtpyFieldName() { + public void testEmptyFieldName() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder(null, "term")); assertEquals("field name cannot be null.", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("", "term")); assertEquals("field name cannot be null.", e.getMessage()); } - public void testEmtpyTermsLookup() { + public void testEmptyTermsLookup() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TermsQueryBuilder("field", (TermsLookup) null)); assertEquals("No value or termsLookup specified for terms query", e.getMessage()); } @@ -274,10 +274,10 @@ public void testGeo() throws Exception { } @Override - protected boolean isCachable(TermsQueryBuilder queryBuilder) { - // even though we use a terms lookup here we do this during rewrite and that means we are cachable on toQuery + protected boolean isCacheable(TermsQueryBuilder queryBuilder) { + // even though we use a terms lookup here we do this during rewrite and that means we are cacheable on toQuery // that's why we return true here all the time - return super.isCachable(queryBuilder); + return super.isCacheable(queryBuilder); } public void testSerializationFailsUnlessFetched() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java index 698cb71692b0f..f68769bb89cb5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java @@ -111,7 +111,7 @@ protected void doAssertLuceneQuery(TermsSetQueryBuilder queryBuilder, Query quer } @Override - protected boolean isCachable(TermsSetQueryBuilder queryBuilder) { + protected boolean isCacheable(TermsSetQueryBuilder queryBuilder) { return queryBuilder.getMinimumShouldMatchField() != null || (queryBuilder.getMinimumShouldMatchScript() != null && queryBuilder.getValues().isEmpty()); } diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index ba673cf2ea4a9..8f177cac863b3 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -797,7 +797,7 @@ public List> getScoreFunctions() { } @Override - protected boolean isCachable(FunctionScoreQueryBuilder queryBuilder) { + protected boolean isCacheable(FunctionScoreQueryBuilder queryBuilder) { FilterFunctionBuilder[] filterFunctionBuilders = queryBuilder.filterFunctionBuilders(); for (FilterFunctionBuilder builder : filterFunctionBuilders) { if (builder.getScoreFunction() instanceof ScriptScoreFunctionBuilder) { diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index 71aab8ca9f9f6..bde74e7aa61b5 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -45,7 +45,7 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase { - public void testDeleteteByQueryRequestImplementsIndicesRequestReplaceable() { + public void testDeleteByQueryRequestImplementsIndicesRequestReplaceable() { int numIndices = between(1, 100); String[] indices = new String[numIndices]; for (int i = 0; i < numIndices; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java b/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java index 16d9df8c820ee..a90e941f1d3f1 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/LeaderBulkByScrollTaskStateTests.java @@ -132,7 +132,7 @@ public void onResponse(T response) { @Override public void onFailure(Exception e) { - throw new RuntimeException("Expected no interations but was received a failure", e); + throw new RuntimeException("Expected no interactions but was received a failure", e); } }; } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index 44b3794ea6d42..6b38e0b5b4d4f 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -232,7 +232,7 @@ public void testWaitForOpsToComplete() throws BrokenBarrierException, Interrupte final AtomicBoolean complete = new AtomicBoolean(); final Thread thread = new Thread(() -> { try { - // sychronize starting with the test thread + // synchronize starting with the test thread barrier.await(); tracker.waitForOpsToComplete(seqNo); complete.set(true); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java index 0aed64d05fc93..8efc1c9423eb9 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java @@ -577,11 +577,11 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { final long minimumActiveLocalCheckpoint = (long) activeLocalCheckpoints.values().stream().min(Integer::compareTo).get(); assertThat(tracker.getGlobalCheckpoint(), equalTo(minimumActiveLocalCheckpoint)); assertThat(updatedGlobalCheckpoint.get(), equalTo(minimumActiveLocalCheckpoint)); - final long minimumInitailizingLocalCheckpoint = (long) initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get(); + final long minimumInitializingLocalCheckpoint = (long) initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get(); // now we are going to add a new allocation ID and bring it in sync which should move it to the in-sync allocation IDs final long localCheckpoint = - randomIntBetween(0, Math.toIntExact(Math.min(minimumActiveLocalCheckpoint, minimumInitailizingLocalCheckpoint) - 1)); + randomIntBetween(0, Math.toIntExact(Math.min(minimumActiveLocalCheckpoint, minimumInitializingLocalCheckpoint) - 1)); // using a different length than we have been using above ensures that we can not collide with a previous allocation ID final AllocationId newSyncingAllocationId = AllocationId.newInitializing(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 475caf06e30a8..b85f9c368f0b5 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -187,7 +187,7 @@ public void testDurableFlagHasEffect() throws Exception { // check if we are synced upto the current write location Translog.Location lastWriteLocation = tlog.getLastWriteLocation(); try { - // the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one + // the lastWriteLocation has a Integer.MAX_VALUE size so we have to create a new one return tlog.ensureSynced(new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0)); } catch (IOException e) { throw new UncheckedIOException(e); @@ -773,16 +773,16 @@ public void testGlobalCheckpointListeners() throws Exception { client().prepareIndex("test", "_doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); assertBusy(() -> assertThat(globalCheckpoint.get(), equalTo((long) index))); // adding a listener expecting a lower global checkpoint should fire immediately - final AtomicLong immediateGlobalCheckpint = new AtomicLong(); + final AtomicLong immediateGlobalCheckpoint = new AtomicLong(); shard.addGlobalCheckpointListener( randomLongBetween(0, i), (g, e) -> { assertThat(g, greaterThanOrEqualTo(NO_OPS_PERFORMED)); assertNull(e); - immediateGlobalCheckpint.set(g); + immediateGlobalCheckpoint.set(g); }, null); - assertBusy(() -> assertThat(immediateGlobalCheckpint.get(), equalTo((long) index))); + assertBusy(() -> assertThat(immediateGlobalCheckpoint.get(), equalTo((long) index))); } final AtomicBoolean invoked = new AtomicBoolean(); shard.addGlobalCheckpointListener( diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 4745904a55467..0573407409a94 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2591,7 +2591,7 @@ public void testRecoverFromLocalShard() throws IOException { targetShard.routingEntry().allocationId().getId()).getLocalCheckpoint(), equalTo(1L)); assertDocCount(targetShard, 2); } - // now check that it's persistent ie. that the added shards are committed + // now check that it's persistent i.e. that the added shards are committed { final IndexShard newShard = reinitShard(targetShard); recoverShardFromStore(newShard); @@ -2775,7 +2775,7 @@ public void testEstimateTotalDocSize() throws Exception { } /** - * here we are simulating the scenario that happens when we do async shard fetching from GatewaySerivce while we are finishing + * here we are simulating the scenario that happens when we do async shard fetching from GatewayService while we are finishing * a recovery and concurrently clean files. This should always be possible without any exception. Yet there was a bug where IndexShard * acquired the index writer lock before it called into the store that has it's own locking for metadata reads */ diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index cbaae21476855..c5eccb9529f81 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -72,7 +72,7 @@ public void testFailLoadShardPathOnMultiState() throws IOException { } } - public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { + public void testFailLoadShardPathIndexUUIDMismatch() throws IOException { try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { Settings.Builder builder = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foobar") .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 966495faa1e13..bb2de8eb0a05f 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -176,7 +176,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx client().admin().indices().prepareUpdateSettings("test").setSettings(build).get(); ClusterHealthResponse health = client().admin().cluster() .health(Requests.clusterHealthRequest("test").waitForGreenStatus() - .timeout("5m") // sometimes due to cluster rebalacing and random settings default timeout is just not enough. + .timeout("5m") // sometimes due to cluster rebalancing and random settings default timeout is just not enough. .waitForNoRelocatingShards(true)).actionGet(); if (health.isTimedOut()) { logger.info("cluster state:\n{}\n{}", diff --git a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 91e73e53ebc15..4e390b275a8c7 100644 --- a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -112,9 +112,9 @@ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, I BulkResponse response = bulkBuilder.get(); if (response.hasFailures()) { - for (BulkItemResponse singleIndexRespons : response.getItems()) { - if (singleIndexRespons.isFailed()) { - fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage()); + for (BulkItemResponse singleIndexResponse : response.getItems()) { + if (singleIndexResponse.isFailed()) { + fail("None of the bulk items should fail but got " + singleIndexResponse.getFailureMessage()); } } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 3eddeea2f2a8a..234d4005b2f61 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -1141,7 +1141,7 @@ public void testSyncUpTo() throws IOException { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(location)); // we are the last location so everything should be synced - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); + assertFalse("the last call to ensureSynced synced all previous ops", translog.syncNeeded()); seqNo = ++count; translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); @@ -1182,7 +1182,7 @@ public void testSyncUpToStream() throws IOException { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); // we are the last location so everything should be synced - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); + assertFalse("the last call to ensureSynced synced all previous ops", translog.syncNeeded()); } else if (rarely()) { rollAndCommit(translog); // not syncing now @@ -2229,7 +2229,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { } } translog.rollGeneration(); - long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); + long committedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); @@ -2241,8 +2241,8 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { translog.close(); TranslogConfig config = translog.getConfig(); final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); - deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); - deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); + deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(committedGeneration, Long.MAX_VALUE)); + deletionPolicy.setMinTranslogGenerationForRecovery(committedGeneration); translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertThat(translog.getMinFileGeneration(), equalTo(1L)); @@ -2251,7 +2251,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { assertFileIsPresent(translog, gen); } translog.trimUnreferencedReaders(); - for (long gen = 1; gen < comittedGeneration; gen++) { + for (long gen = 1; gen < committedGeneration; gen++) { assertFileDeleted(translog, gen); } } @@ -2265,7 +2265,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { final FailSwitch fail = new FailSwitch(); fail.failNever(); final TranslogConfig config = getTranslogConfig(tempDir); - final long comittedGeneration; + final long committedGeneration; final String translogUUID; try (Translog translog = getFailableTranslog(fail, config)) { final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); @@ -2282,7 +2282,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { } } translog.rollGeneration(); - comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); + committedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); @@ -2290,8 +2290,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { translog.rollGeneration(); } } - deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, translog.currentFileGeneration())); - deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); + deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(committedGeneration, translog.currentFileGeneration())); + deletionPolicy.setMinTranslogGenerationForRecovery(committedGeneration); fail.failRandomly(); try { translog.trimUnreferencedReaders(); @@ -2300,16 +2300,16 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { } } final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); - deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); - deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); + deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(committedGeneration, Long.MAX_VALUE)); + deletionPolicy.setMinTranslogGenerationForRecovery(committedGeneration); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); - assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); + assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(committedGeneration)); assertFilePresences(translog); translog.trimUnreferencedReaders(); - assertThat(translog.getMinFileGeneration(), equalTo(comittedGeneration)); + assertThat(translog.getMinFileGeneration(), equalTo(committedGeneration)); assertFilePresences(translog); } } @@ -2588,7 +2588,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { for (int i = 0; i < 1; i++) { Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); - assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString())); + assertEquals("payload mismatch", i, Integer.parseInt(next.getSource().source.utf8ToString())); } } tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index e27aefdf13fff..09e371761d6f2 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -311,7 +311,7 @@ public void testClearAllEntityIdentity() throws Exception { assertEquals("baz", value3.streamInput().readString()); assertEquals(3, cache.count()); final long hitCount = requestCacheStats.stats().getHitCount(); - // clear all for the indexShard Idendity even though is't still open + // clear all for the indexShard Identity even though it's still open cache.clear(randomFrom(entity, secondEntity)); cache.cleanCache(); assertEquals(1, cache.count()); diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java index 1bf95f612ce9f..099062d61911b 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/LegacyUpdateMappingIntegrationIT.java @@ -182,7 +182,7 @@ public void testUpdateDefaultMappingSettings() throws Exception { { mappingBuilder.startObject(MapperService.DEFAULT_MAPPING); { - mappingBuilder.startObject("properites"); + mappingBuilder.startObject("properties"); { mappingBuilder.startObject("f"); { diff --git a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java index 08f45eac5be64..c2a48e07c4073 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java @@ -68,7 +68,7 @@ public void testUpdatePrivateIndexSettingViaSettingsAPI() { assertNull(responseAfterAttemptedUpdate.getSetting("test", "index.private")); } - public void testUpdatePrivatelIndexSettingViaDedicatedAPI() { + public void testUpdatePrivateIndexSettingViaDedicatedAPI() { createIndex("test"); client().execute( InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, diff --git a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index fb3eac28b6793..9758f3acf2f93 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -422,7 +422,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .execute() .actionGet() ); - assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); + assertThat(ex.getMessage(), containsString("final test setting [index.final], not updatable")); indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertThat(indexMetaData.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetaData.getSettings().get("index.final"), nullValue()); diff --git a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index a0e68b560ee1a..66d6b41863a50 100644 --- a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -77,7 +77,7 @@ protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), "") // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. - // to prevent this we are setting the timeout here to something highish ie. the default in practice + // to prevent this we are setting the timeout here to something highish i.e. the default in practice .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(30, TimeUnit.SECONDS)) .build(); } diff --git a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index 9111658e49ca8..8979ac0a289d5 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -137,9 +137,9 @@ public void testReadProcessors() throws Exception { unknownTaggedConfig = new HashMap<>(); unknownTaggedConfig.put("tag", "my_unknown"); config2.add(Collections.singletonMap("unknown_processor", unknownTaggedConfig)); - Map secondUnknonwTaggedConfig = new HashMap<>(); - secondUnknonwTaggedConfig.put("tag", "my_second_unknown"); - config2.add(Collections.singletonMap("second_unknown_processor", secondUnknonwTaggedConfig)); + Map secondUnknownTaggedConfig = new HashMap<>(); + secondUnknownTaggedConfig.put("tag", "my_second_unknown"); + config2.add(Collections.singletonMap("second_unknown_processor", secondUnknownTaggedConfig)); e = expectThrows( ElasticsearchParseException.class, () -> ConfigurationUtils.readProcessorConfigs(config2, scriptService, registry) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index a80aa4142ebe4..23842a3f9df55 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -692,7 +692,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( } } - public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUknownMetric() throws Exception { + public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMetric() throws Exception { for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index bee32d571b69f..5cf4bcac2818a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -1070,7 +1070,7 @@ public void testEmptyWithExtendedBounds() throws Exception { /** * see issue #9634, negative interval in histogram should raise exception */ - public void testExeptionOnNegativerInterval() { + public void testExceptionOnNegativeInterval() { try { client().prepareSearch("empty_bucket_idx") .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).get(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 481050acee498..6b704a6711ad9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -690,7 +690,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( } } - public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUknownMetric() throws Exception { + public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMetric() throws Exception { for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index d68c85ab652ae..e818f18a69793 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -414,7 +414,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { terms("startDate").field("dates.month.start").subAggregation( terms("endDate").field("dates.month.end").subAggregation( terms("period").field("dates.month.label").subAggregation( - nested("ctxt_idfier_nested", "comments") + nested("ctxt_identifier_nested", "comments") .subAggregation(filter("comment_filter", termQuery("comments.identifier", "29111")) .subAggregation(nested("nested_tags", "comments.tags") .subAggregation( @@ -439,7 +439,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { Terms period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-11"); assertThat(bucket.getDocCount(), equalTo(1L)); - Nested comments = bucket.getAggregations().get("ctxt_idfier_nested"); + Nested comments = bucket.getAggregations().get("ctxt_identifier_nested"); assertThat(comments.getDocCount(), equalTo(2L)); Filter filter = comments.getAggregations().get("comment_filter"); assertThat(filter.getDocCount(), equalTo(1L)); @@ -456,7 +456,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-12"); assertThat(bucket.getDocCount(), equalTo(1L)); - comments = bucket.getAggregations().get("ctxt_idfier_nested"); + comments = bucket.getAggregations().get("ctxt_identifier_nested"); assertThat(comments.getDocCount(), equalTo(2L)); filter = comments.getAggregations().get("comment_filter"); assertThat(filter.getDocCount(), equalTo(1L)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 274fb2d4ffd87..11eed6f90e739 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -842,7 +842,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( } } - public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUknownMetric() throws Exception { + public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMetric() throws Exception { for (String index : Arrays.asList("idx", "idx_unmapped")) { try { SearchResponse response = client() diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index ce8d9c2da834f..3e50659f997ad 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -293,7 +293,7 @@ public void testBasics() throws Exception { assertThat(terms.getName(), equalTo("terms")); assertThat(terms.getBuckets().size(), equalTo(5)); - long higestSortValue = 0; + long highestSortValue = 0; for (int i = 0; i < 5; i++) { Terms.Bucket bucket = terms.getBucketByKey("val" + i); assertThat(bucket, notNullValue()); @@ -303,10 +303,10 @@ public void testBasics() throws Exception { SearchHits hits = topHits.getHits(); assertThat(hits.getTotalHits().value, equalTo(10L)); assertThat(hits.getHits().length, equalTo(3)); - higestSortValue += 10; - assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + highestSortValue += 10; + assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(highestSortValue)); + assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(highestSortValue - 1)); + assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(highestSortValue - 2)); assertThat(hits.getAt(0).getSourceAsMap().size(), equalTo(4)); } @@ -517,7 +517,7 @@ public void testSortByBucket() throws Exception { assertThat(terms.getName(), equalTo("terms")); assertThat(terms.getBuckets().size(), equalTo(5)); - long higestSortValue = 50; + long highestSortValue = 50; int currentBucket = 4; for (Terms.Bucket bucket : terms.getBuckets()) { assertThat(key(bucket), equalTo("val" + currentBucket--)); @@ -526,12 +526,12 @@ public void testSortByBucket() throws Exception { SearchHits hits = topHits.getHits(); assertThat(hits.getTotalHits().value, equalTo(10L)); assertThat(hits.getHits().length, equalTo(3)); - assertThat(hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat(hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat(hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + assertThat(hits.getAt(0).getSortValues()[0], equalTo(highestSortValue)); + assertThat(hits.getAt(1).getSortValues()[0], equalTo(highestSortValue - 1)); + assertThat(hits.getAt(2).getSortValues()[0], equalTo(highestSortValue - 2)); Max max = bucket.getAggregations().get("max_sort"); - assertThat(max.getValue(), equalTo(((Long) higestSortValue).doubleValue())); - higestSortValue -= 10; + assertThat(max.getValue(), equalTo(((Long) highestSortValue).doubleValue())); + highestSortValue -= 10; } } diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 3d39da60254fa..f4708bf81b685 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -127,7 +127,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc if (clusterHealthResponse.isTimedOut()) { /* some seeds just won't let you create the index at all and we enter a ping-pong mode * trying one node after another etc. that is ok but we need to make sure we don't wait - * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail + * forever when indexing documents so we set numDocs = 1 and expect all shards to fail * when we search below.*/ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail"); numDocs = 1; diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 46bca911e9c94..6084abcc28881 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -157,7 +157,7 @@ public void testFromXContent() throws IOException { /** * test that unknown array fields cause exception */ - public void testUnknownArrayNameExpection() throws IOException { + public void testUnknownArrayNameException() throws IOException { { XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"bad_fieldname\" : [ \"field1\" 1 \"field2\" ]\n" + @@ -188,7 +188,7 @@ private T expectParseThrows(Class exceptionClass, Strin /** * test that unknown field name cause exception */ - public void testUnknownFieldnameExpection() throws IOException { + public void testUnknownFieldnameException() throws IOException { { XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"bad_fieldname\" : \"value\"\n" + @@ -213,7 +213,7 @@ public void testUnknownFieldnameExpection() throws IOException { /** * test that unknown field name cause exception */ - public void testUnknownObjectFieldnameExpection() throws IOException { + public void testUnknownObjectFieldnameException() throws IOException { { XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"bad_fieldname\" : { \"field\" : \"value\" }\n \n" + @@ -383,7 +383,7 @@ private static Field getFieldBuilderByName(HighlightBuilder highlightBuilder, St } /** - * `tags_schema` is not produced by toXContent in the builder but should be parseable, so this + * `tags_schema` is not produced by toXContent in the builder but should be parsable, so this * adds a simple json test for this. */ public void testParsingTagsSchema() throws IOException { @@ -412,10 +412,10 @@ public void testParsingTagsSchema() throws IOException { highlightBuilder.postTags()); XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + - " \"tags_schema\" : \"somthing_else\"\n" + + " \"tags_schema\" : \"something_else\"\n" + "}\n"); assertThat(e.getMessage(), containsString("[highlight] failed to parse field [tags_schema]")); - assertEquals("Unknown tag schema [somthing_else]", e.getCause().getMessage()); + assertEquals("Unknown tag schema [something_else]", e.getCause().getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 03e2a8e8248b9..5906ba5d51687 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -333,29 +333,29 @@ public void testEnsureNoNegativeOffsets() throws Exception { "long_term", "type=text,term_vector=with_positions_offsets")); client().prepareIndex("test", "type1", "1") - .setSource("no_long_term", "This is a test where foo is highlighed and should be highlighted", - "long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed " + .setSource("no_long_term", "This is a test where foo is highlighted and should be highlighted", + "long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighted " + "and should be highlighted") .get(); refresh(); SearchResponse search = client().prepareSearch() - .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed")) + .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighted")) .highlighter(new HighlightBuilder().field("long_term", 18, 1).highlighterType("fvh")) .get(); assertHighlight(search, 0, "long_term", 0, 1, equalTo("thisisaverylongwordandmakessurethisfails")); search = client().prepareSearch() - .setQuery(matchPhraseQuery("no_long_term", "test foo highlighed").slop(3)) + .setQuery(matchPhraseQuery("no_long_term", "test foo highlighted").slop(3)) .highlighter(new HighlightBuilder().field("no_long_term", 18, 1).highlighterType("fvh").postTags("
    ").preTags("")) .get(); assertNotHighlighted(search, 0, "no_long_term"); search = client().prepareSearch() - .setQuery(matchPhraseQuery("no_long_term", "test foo highlighed").slop(3)) + .setQuery(matchPhraseQuery("no_long_term", "test foo highlighted").slop(3)) .highlighter(new HighlightBuilder().field("no_long_term", 30, 1).highlighterType("fvh").postTags("").preTags("")) .get(); - assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a test where foo is highlighed and")); + assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a test where foo is highlighted and")); } public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception { @@ -1616,7 +1616,7 @@ public void testResetTwice() throws Exception { SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) .highlighter(new HighlightBuilder().field("text")).get(); - // Mock tokenizer will throw an exception if it is resetted twice + // Mock tokenizer will throw an exception if it is reset twice assertHitCount(response, 1L); } @@ -2630,7 +2630,7 @@ private void phraseBoostTestCase(String highlighterType) { public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException { // check that we do not get an exception for geo_point fields in case someone tries to highlight - // it accidentially with a wildcard + // it accidentally with a wildcard // see https://github.com/elastic/elasticsearch/issues/17537 XContentBuilder mappings = jsonBuilder(); mappings.startObject(); diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 5f5f742bfd630..a3261e2652f6e 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -691,7 +691,7 @@ public void testGetFieldsComplexField() throws Exception { } // see #8203 - public void testSingleValueFieldDatatField() throws ExecutionException, InterruptedException { + public void testSingleValueFieldDataField() throws ExecutionException, InterruptedException { assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "test_field", "type=keyword").get()); indexRandom(true, client().prepareIndex("test", "type", "1").setSource("test_field", "foobar")); diff --git a/server/src/test/java/org/elasticsearch/search/profile/TimerTests.java b/server/src/test/java/org/elasticsearch/search/profile/TimerTests.java index fc1bd76f39ecd..b3a6d66ab0147 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/TimerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/TimerTests.java @@ -60,7 +60,7 @@ long nanoTime() { t.start(); t.stop(); assertEquals(i, t.getCount()); - // Make sure the cumulated timing is 42 times the number of calls as expected + // Make sure the accumulated timing is 42 times the number of calls as expected assertEquals(i * 42L, t.getApproximateTiming()); } } diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 0f647353e95af..a93243d5da553 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -212,7 +212,7 @@ public MappedFieldType fieldMapper(String name) { /** * test parsing exceptions for incorrect rescorer syntax */ - public void testUnknownFieldsExpection() throws IOException { + public void testUnknownFieldsException() throws IOException { String rescoreElement = "{\n" + " \"window_size\" : 20,\n" + diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 97d9361fc46fe..83a89036b6ead 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -74,11 +74,11 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); GeoPoint[] d1Points = {new GeoPoint(3, 2), new GeoPoint(4, 1)}; - createShuffeldJSONArray(d1Builder, d1Points); + createShuffledJSONArray(d1Builder, d1Points); XContentBuilder d2Builder = jsonBuilder(); GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)}; - createShuffeldJSONArray(d2Builder, d2Points); + createShuffledJSONArray(d2Builder, d2Points); logger.info("d1: {}", d1Builder); logger.info("d2: {}", d2Builder); @@ -148,11 +148,11 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); GeoPoint[] d1Points = {new GeoPoint(0, 1), new GeoPoint(0, 4), new GeoPoint(0, 10)}; - createShuffeldJSONArray(d1Builder, d1Points); + createShuffledJSONArray(d1Builder, d1Points); XContentBuilder d2Builder = jsonBuilder(); GeoPoint[] d2Points = {new GeoPoint(0, 1), new GeoPoint(0, 5), new GeoPoint(0, 6)}; - createShuffeldJSONArray(d2Builder, d2Points); + createShuffledJSONArray(d2Builder, d2Points); logger.info("d1: {}", d1Builder); logger.info("d2: {}", d2Builder); @@ -182,7 +182,7 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc closeTo(GeoDistance.ARC.calculate(0, 0, 0, 5, DistanceUnit.METERS), 10d)); } - protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] pointsArray) throws IOException { + protected void createShuffledJSONArray(XContentBuilder builder, GeoPoint[] pointsArray) throws IOException { List points = new ArrayList<>(); points.addAll(Arrays.asList(pointsArray)); builder.startObject(); @@ -213,11 +213,11 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); GeoPoint[] d1Points = {new GeoPoint(2.5, 1), new GeoPoint(2.75, 2), new GeoPoint(3, 3), new GeoPoint(3.25, 4)}; - createShuffeldJSONArray(d1Builder, d1Points); + createShuffledJSONArray(d1Builder, d1Points); XContentBuilder d2Builder = jsonBuilder(); GeoPoint[] d2Points = {new GeoPoint(4.5, 1), new GeoPoint(4.75, 2), new GeoPoint(5, 3), new GeoPoint(5.25, 4)}; - createShuffeldJSONArray(d2Builder, d2Points); + createShuffledJSONArray(d2Builder, d2Points); indexRandom(true, client().prepareIndex("index", "type", "d1").setSource(d1Builder), diff --git a/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java index 5f5ea5e869450..a7a139cd9b055 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java @@ -108,7 +108,7 @@ public void testSingleFieldSort() throws IOException { sortBuilder = result.get(0); assertEquals(new ScoreSortBuilder(), sortBuilder); - // test two spellings for _geo_disctance + // test two spellings for _geo_distance json = "{ \"sort\" : [" + "{\"_geoDistance\" : {" + "\"pin.location\" : \"40,-70\" } }" diff --git a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java index 9d1f01fe3289b..b7c1df6bceaf7 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -228,7 +228,7 @@ protected String[] shuffleProtectedFields() { private SB mutate(SB firstBuilder) throws IOException { SB mutation = copy(firstBuilder); assertNotSame(mutation, firstBuilder); - // change ither one of the shared SuggestionBuilder parameters, or delegate to the specific tests mutate method + // change either one of the shared SuggestionBuilder parameters, or delegate to the specific tests mutate method if (randomBoolean()) { switch (randomIntBetween(0, 5)) { case 0: diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java index d54a20f66dbbd..bb579c5a743b9 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java @@ -195,7 +195,7 @@ protected void assertSuggestionContext(PhraseSuggestionBuilder builder, Suggesti assertOptionalEquals(builder.confidence(), phraseSuggesterCtx.confidence(), PhraseSuggestionContext.DEFAULT_CONFIDENCE); assertOptionalEquals(builder.collatePrune(), phraseSuggesterCtx.collatePrune(), PhraseSuggestionContext.DEFAULT_COLLATE_PRUNE); assertEquals(builder.separator(), phraseSuggesterCtx.separator().utf8ToString()); - assertOptionalEquals(builder.realWordErrorLikelihood(), phraseSuggesterCtx.realworldErrorLikelyhood(), + assertOptionalEquals(builder.realWordErrorLikelihood(), phraseSuggesterCtx.realworldErrorLikelihood(), PhraseSuggestionContext.DEFAULT_RWE_ERRORLIKELIHOOD); assertOptionalEquals(builder.maxErrors(), phraseSuggesterCtx.maxErrors(), PhraseSuggestionContext.DEFAULT_MAX_ERRORS); assertOptionalEquals(builder.forceUnigrams(), phraseSuggesterCtx.getRequireUnigram(), diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 40bfaf97da8e5..efebb270917e7 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -835,7 +835,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception { assertTrue(snapshotInfo.state().completed()); }, 1, TimeUnit.MINUTES); - logger.info("--> verify that snapshot was succesful"); + logger.info("--> verify that snapshot was successful"); GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo") .setSnapshots("test-snap").get(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java index 3d7277b9c68d3..e78af4a94ca1e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java @@ -53,14 +53,14 @@ public void testSerialization() throws IOException { assertThat(new Snapshot(out.bytes().streamInput()), equalTo(original)); } - public void testCreateSnapshotRequestDescrptions() { + public void testCreateSnapshotRequestDescriptions() { CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(); createSnapshotRequest.snapshot("snapshot_name"); createSnapshotRequest.repository("repo_name"); assertEquals("snapshot [repo_name:snapshot_name]", createSnapshotRequest.getDescription()); } - public void testRestoreSnapshotRequestDescrptions() { + public void testRestoreSnapshotRequestDescriptions() { RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(); restoreSnapshotRequest.snapshot("snapshot_name"); restoreSnapshotRequest.repository("repo_name"); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index ac58b0e25b91e..dd46059aa2abf 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -66,7 +66,7 @@ public void testLoggingHandler() throws IOException { ", action: cluster:monitor/stats]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final String readPattern = @@ -78,7 +78,7 @@ public void testLoggingHandler() throws IOException { " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "cluster monitor request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); diff --git a/server/src/test/java/org/elasticsearch/update/UpdateIT.java b/server/src/test/java/org/elasticsearch/update/UpdateIT.java index 05b27758ee434..f5cfd8d8a1379 100644 --- a/server/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -744,7 +744,7 @@ private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOut } //If are no errors every request received a response otherwise the test would have timedout - //aquiring the request outstanding semaphores. + //acquiring the request outstanding semaphores. for (Throwable throwable : failures) { logger.info("Captured failure on concurrent update:", throwable); } diff --git a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index 1a00698325974..2af6b5a13e995 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -81,7 +81,7 @@ public void testSlice() throws IOException { } BytesRef singlePageOrNull = getSinglePageOrNull(slice); if (singlePageOrNull != null) { - // we can't assert the offset since if the length is smaller than the refercence + // we can't assert the offset since if the length is smaller than the reference // the offset can be anywhere assertEquals(sliceLength, singlePageOrNull.length); } @@ -446,7 +446,7 @@ public void testSliceArrayOffset() throws IOException { // original reference has pages assertEquals(sliceOffset % PAGE_SIZE, singlePageOrNull.offset); } else { - // orig ref has no pages ie. BytesArray + // orig ref has no pages i.e. BytesArray assertEquals(sliceOffset, singlePageOrNull.offset); } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/TestThreadInfoPatternConverter.java b/test/framework/src/main/java/org/elasticsearch/common/logging/TestThreadInfoPatternConverter.java index b8d90e9e3a208..a01c16e39a409 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/TestThreadInfoPatternConverter.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/TestThreadInfoPatternConverter.java @@ -32,7 +32,7 @@ /** * Converts {@code %test_thread_info} in log4j patterns into information - * based on the loggin thread's name. If that thread is part of an + * based on the logging thread's name. If that thread is part of an * {@link ESIntegTestCase} then this information is the node name. */ @Plugin(category = PatternConverter.CATEGORY, name = "TestInfoPatternConverter") diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 8914bad5c4102..8130e845f9f61 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -38,12 +38,12 @@ public abstract class FieldTypeTestCase extends ESTestCase { public abstract static class Modifier { /** The name of the property that is being modified. Used in test failure messages. */ public final String property; - /** True if this property is updateable, false otherwise. */ - public final boolean updateable; + /** True if this property is updatable, false otherwise. */ + public final boolean updatable; - public Modifier(String property, boolean updateable) { + public Modifier(String property, boolean updatable) { this.property = property; - this.updateable = updateable; + this.updatable = updatable; } /** Modifies the property */ @@ -344,7 +344,7 @@ public void testCheckCompatibility() { ft2 = createNamedDefaultFieldType(); modifier.normalizeOther(ft1); modifier.modify(ft2); - if (modifier.updateable) { + if (modifier.updatable) { assertCompatible(modifier.property, ft1, ft2); assertCompatible(modifier.property, ft2, ft1); // always symmetric when not strict } else { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 84e88b6accc5b..27dc2ab937cfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -374,7 +374,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe /** * Takes an existing shard, closes it and starts a new initialing shard at the same location * - * @param listeners new listerns to use for the newly created shard + * @param listeners new listeners to use for the newly created shard */ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException { final ShardRouting shardRouting = current.routingEntry(); @@ -387,7 +387,7 @@ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener.. * Takes an existing shard, closes it and starts a new initialing shard at the same location * * @param routing the shard routing to use for the newly created shard. - * @param listeners new listerns to use for the newly created shard + * @param listeners new listeners to use for the newly created shard */ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException { return reinitShard(current, routing, current.engineFactory, listeners); @@ -397,7 +397,7 @@ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Index * Takes an existing shard, closes it and starts a new initialing shard at the same location * * @param routing the shard routing to use for the newly created shard. - * @param listeners new listerns to use for the newly created shard + * @param listeners new listeners to use for the newly created shard * @param engineFactory the engine factory for the new shard */ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, EngineFactory engineFactory, @@ -669,8 +669,8 @@ public static List getDocIdAndSeqNos(final IndexShard shard) return EngineTestCase.getDocIds(shard.getEngine(), true); } - protected void assertDocCount(IndexShard shard, int docDount) throws IOException { - assertThat(getShardDocUIDs(shard), hasSize(docDount)); + protected void assertDocCount(IndexShard shard, int docCount) throws IOException { + assertThat(getShardDocUIDs(shard), hasSize(docCount)); } protected void assertDocs(IndexShard shard, String... ids) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index 58eb1df129291..960a2d28653c6 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -139,7 +139,7 @@ public static IngestDocument randomIngestDocument(Random random, Map localNodeFactory, ClusterSettings clusterSettings, Set taskHeaders) { // we use the MockTransportService.TestPlugin class as a marker to create a network - // module with this MockNetworkService. NetworkService is such an integral part of the systme + // module with this MockNetworkService. NetworkService is such an integral part of the system // we don't allow to plug it in from plugins or anything. this is a test-only override and // can't be done in a production env. if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).isEmpty()) { diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index ffd19d8e94d6e..66188e57f4337 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -213,7 +213,7 @@ protected void randomFieldOrScript(ValuesSourceAggregationBuilder factory, factory.script(mockScript("doc[" + field + "] + 1")); break; default: - throw new AssertionError("Unknow random operation [" + choice + "]"); + throw new AssertionError("Unknown random operation [" + choice + "]"); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 40c23b9d5b3fb..25e11a924e03c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -170,7 +170,7 @@ public final void testUnknownObjectException() throws IOException { // Adds the alternates versions of the query too candidates.addAll(getAlternateVersions().keySet()); - List> testQueries = alterateQueries(candidates, getObjectsHoldingArbitraryContent()); + List> testQueries = alternateQueries(candidates, getObjectsHoldingArbitraryContent()); for (Tuple testQuery : testQueries) { boolean expectedException = testQuery.v2(); try { @@ -231,7 +231,7 @@ public final void testUnknownObjectException() throws IOException { * for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the * arbitraryMarkers parameter. */ - static List> alterateQueries(Set queries, Set arbitraryMarkers) throws IOException { + static List> alternateQueries(Set queries, Set arbitraryMarkers) throws IOException { List> results = new ArrayList<>(); // Indicate if a part of the query can hold any arbitrary content @@ -307,7 +307,7 @@ static List> alterateQueries(Set queries, Set *
  • Take a reference documentation example. - *
  • Stick it into the createParseableQueryJson method of the respective query test. + *
  • Stick it into the createParsableQueryJson method of the respective query test. *
  • Manually check that what the QueryBuilder generates equals the input json ignoring default options. *
  • Put the manual checks into the assertQueryParsedFromJson method. - *
  • Now copy the generated json including default options into createParseableQueryJson + *
  • Now copy the generated json including default options into createParsableQueryJson *
  • By now the roundtrip check for the json should be happy. * **/ diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index 9507c5e12f8c2..ddf35ac0c8ee5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -271,8 +271,8 @@ static BytesReference insertRandomFieldsAndShuffle(BytesReference xContent, XCon } else { withRandomFields = xContent; } - XContentParser parserWithRandonFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields); - return BytesReference.bytes(ESTestCase.shuffleXContent(parserWithRandonFields, false, shuffleFieldsExceptions)); + XContentParser parserWithRandomFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields); + return BytesReference.bytes(ESTestCase.shuffleXContent(parserWithRandomFields, false, shuffleFieldsExceptions)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index e17377b500d6b..b9b9e78237e6b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1491,7 +1491,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma for (int i = 0; i < numBogusDocs; i++) { String id = "bogus_doc_" + randomRealisticUnicodeOfLength(unicodeLen) - + Integer.toString(dummmyDocIdGenerator.incrementAndGet()); + + Integer.toString(dummyDocIdGenerator.incrementAndGet()); Map.Entry> indexAndTypes = RandomPicks.randomFrom(random, indicesAndTypes.entrySet()); String index = indexAndTypes.getKey(); String type = RandomPicks.randomFrom(random, indexAndTypes.getValue()); @@ -1560,7 +1560,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } - private AtomicInteger dummmyDocIdGenerator = new AtomicInteger(); + private AtomicInteger dummyDocIdGenerator = new AtomicInteger(); /** Disables an index block for the specified index */ public static void disableIndexBlock(String index, String block) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index e6e11dacb749f..226c031e96dab 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -835,7 +835,7 @@ private final class NodeAndClient implements Closeable { this.name = name; this.originalNodeSettings = originalNodeSettings; this.nodeAndClientId = nodeAndClientId; - markNodeDataDirsAsNotEligableForWipe(node); + markNodeDataDirsAsNotEligibleForWipe(node); } Node node() { @@ -977,7 +977,7 @@ public void afterStart() { } }); closed.set(false); - markNodeDataDirsAsNotEligableForWipe(node); + markNodeDataDirsAsNotEligibleForWipe(node); } @Override @@ -1424,7 +1424,7 @@ private void markNodeDataDirsAsPendingForWipe(Node node) { } } - private void markNodeDataDirsAsNotEligableForWipe(Node node) { + private void markNodeDataDirsAsNotEligibleForWipe(Node node) { assert Thread.holdsLock(this); NodeEnvironment nodeEnv = node.getNodeEnvironment(); if (nodeEnv.hasNodeFile()) { @@ -1712,7 +1712,7 @@ public void fullRestart() throws Exception { } /** - * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time. + * Restarts all nodes in a rolling restart fashion i.e. only restarts on node a time. */ public synchronized void rollingRestart(RestartCallback callback) throws Exception { int numNodesRestarted = 0; diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index ccb010e2a915e..c91d03c71c484 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -154,7 +154,7 @@ public boolean innerMatch(final LogEvent event) { } - public static class PatternSeenEventExcpectation implements LoggingExpectation { + public static class PatternSeenEventExpectation implements LoggingExpectation { protected final String name; protected final String logger; @@ -162,7 +162,7 @@ public static class PatternSeenEventExcpectation implements LoggingExpectation { protected final String pattern; volatile boolean saw; - public PatternSeenEventExcpectation(String name, String logger, Level level, String pattern) { + public PatternSeenEventExpectation(String name, String logger, Level level, String pattern) { this.name = name; this.logger = logger; this.level = level; diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 84c480b8d510b..ca84f73a4a5f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -43,7 +43,7 @@ public class VersionUtils { * rules here match up with the rules in gradle then this should * produce sensible results. * @return a tuple containing versions with backwards compatibility - * guarantees in v1 and versions without the guranteees in v2 + * guarantees in v1 and versions without the guarantees in v2 */ static Tuple, List> resolveReleasedVersions(Version current, Class versionClass) { // group versions into major version diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index 52b086db338f3..785e63a4eaf64 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -53,13 +53,13 @@ public final class MockEngineSupport { /** * Allows tests to wrap an index reader randomly with a given ratio. This - * is disabled by default ie. {@code 0.0d} since reader wrapping is insanely + * is disabled by default i.e. {@code 0.0d} since reader wrapping is insanely * slow if {@link AssertingDirectoryReader} is used. */ public static final Setting WRAP_READER_RATIO = Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, Property.IndexScope); /** - * Allows tests to prevent an engine from being flushed on close ie. to test translog recovery... + * Allows tests to prevent an engine from being flushed on close i.e. to test translog recovery... */ public static final Setting DISABLE_FLUSH_ON_CLOSE = Setting.boolSetting("index.mock.disable_flush_on_close", false, Property.IndexScope); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 02918ea78714e..b337966a57f1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -429,7 +429,7 @@ protected boolean preserveRollupJobsUponCompletion() { /** * Returns whether to preserve ILM Policies of this test. Defaults to not - * preserviing them. Only runs at all if xpack is installed on the cluster + * preserving them. Only runs at all if xpack is installed on the cluster * being tested. */ protected boolean preserveILMPoliciesUponCompletion() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponseException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponseException.java index 8874570b73ccc..49a1a142bada0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponseException.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponseException.java @@ -46,7 +46,7 @@ public ClientYamlTestResponse getRestTestResponse() { } /** - * Exposes the origina {@link ResponseException}. Note that the entity will always be null as it + * Exposes the original {@link ResponseException}. Note that the entity will always be null as it * gets eagerly consumed and exposed through {@link #getRestTestResponse()}. */ public ResponseException getResponseException() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index f04bead4fbfa1..41d09d66c35cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -181,7 +181,7 @@ public static Iterable createParameters() throws Exception { /** * Create parameters for this parameterized test. */ - public static Iterable createParameters(NamedXContentRegistry executeableSectionRegistry) throws Exception { + public static Iterable createParameters(NamedXContentRegistry executableSectionRegistry) throws Exception { String[] paths = resolvePathsProperty(REST_TESTS_SUITE, ""); // default to all tests under the test root Map> yamlSuites = loadSuites(paths); List suites = new ArrayList<>(); @@ -190,7 +190,7 @@ public static Iterable createParameters(NamedXContentRegistry executea for (String api : yamlSuites.keySet()) { List yamlFiles = new ArrayList<>(yamlSuites.get(api)); for (Path yamlFile : yamlFiles) { - ClientYamlTestSuite suite = ClientYamlTestSuite.parse(executeableSectionRegistry, api, yamlFile); + ClientYamlTestSuite suite = ClientYamlTestSuite.parse(executableSectionRegistry, api, yamlFile); suites.add(suite); try { suite.validate(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index eccf99a22607e..de5425587c30e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -46,7 +46,7 @@ * Supports a setup section and multiple test sections. */ public class ClientYamlTestSuite { - public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSectionRegistry, String api, Path file) throws IOException { + public static ClientYamlTestSuite parse(NamedXContentRegistry executableSectionRegistry, String api, Path file) throws IOException { if (!Files.isRegularFile(file)) { throw new IllegalArgumentException(file.toAbsolutePath() + " is not a file"); } @@ -70,7 +70,7 @@ public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSection } } - try (XContentParser parser = YamlXContent.yamlXContent.createParser(executeableSectionRegistry, + try (XContentParser parser = YamlXContent.yamlXContent.createParser(executableSectionRegistry, LoggingDeprecationHandler.INSTANCE, Files.newInputStream(file))) { return parse(api, filename, parser); } catch(Exception e) { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 43542d48a6c39..b4661611a9c0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -688,7 +688,7 @@ public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierExcepti final CyclicBarrier go = new CyclicBarrier(halfSenders * 2 + 1); final CountDownLatch done = new CountDownLatch(halfSenders * 2); for (int i = 0; i < halfSenders; i++) { - // B senders just generated activity so serciveA can respond, we don't test what's going on there + // B senders just generated activity so serviceA can respond, we don't test what's going on there final int sender = i; threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { @Override @@ -1929,7 +1929,7 @@ public void testTimeoutPerConnection() throws IOException { assumeTrue("Works only on BSD network stacks and apparently windows", Constants.MAC_OS_X || Constants.FREE_BSD || Constants.WINDOWS); try (ServerSocket socket = new MockServerSocket()) { - // note - this test uses backlog=1 which is implementation specific ie. it might not work on some TCP/IP stacks + // note - this test uses backlog=1 which is implementation specific i.e. it might not work on some TCP/IP stacks // on linux (at least newer ones) the listen(addr, backlog=1) should just ignore new connections if the queue is full which // means that once we received an ACK from the client we just drop the packet on the floor (which is what we want) and we run // into a connection timeout quickly. Yet other implementations can for instance can terminate the connection within the 3 way diff --git a/test/framework/src/test/java/org/elasticsearch/ingest/IngestDocumentMatcherTests.java b/test/framework/src/test/java/org/elasticsearch/ingest/IngestDocumentMatcherTests.java index bff9a923b9f75..21cfbb44b94bd 100644 --- a/test/framework/src/test/java/org/elasticsearch/ingest/IngestDocumentMatcherTests.java +++ b/test/framework/src/test/java/org/elasticsearch/ingest/IngestDocumentMatcherTests.java @@ -35,7 +35,7 @@ public void testDifferentMapData() { sourceAndMetadata1.put("foo", "bar"); IngestDocument document1 = new IngestDocument(sourceAndMetadata1, new HashMap<>()); IngestDocument document2 = new IngestDocument(new HashMap<>(), new HashMap<>()); - assertThrowsOnComparision(document1, document2); + assertThrowsOnComparison(document1, document2); } public void testDifferentLengthListData() { @@ -44,7 +44,7 @@ public void testDifferentLengthListData() { new IngestDocument(Collections.singletonMap(rootKey, Arrays.asList("bar", "baz")), new HashMap<>()); IngestDocument document2 = new IngestDocument(Collections.singletonMap(rootKey, Collections.emptyList()), new HashMap<>()); - assertThrowsOnComparision(document1, document2); + assertThrowsOnComparison(document1, document2); } public void testDifferentNestedListFieldData() { @@ -53,7 +53,7 @@ public void testDifferentNestedListFieldData() { new IngestDocument(Collections.singletonMap(rootKey, Arrays.asList("bar", "baz")), new HashMap<>()); IngestDocument document2 = new IngestDocument(Collections.singletonMap(rootKey, Arrays.asList("bar", "blub")), new HashMap<>()); - assertThrowsOnComparision(document1, document2); + assertThrowsOnComparison(document1, document2); } public void testDifferentNestedMapFieldData() { @@ -62,7 +62,7 @@ public void testDifferentNestedMapFieldData() { new IngestDocument(Collections.singletonMap(rootKey, Collections.singletonMap("bar", "baz")), new HashMap<>()); IngestDocument document2 = new IngestDocument(Collections.singletonMap(rootKey, Collections.singletonMap("bar", "blub")), new HashMap<>()); - assertThrowsOnComparision(document1, document2); + assertThrowsOnComparison(document1, document2); } public void testOnTypeConflict() { @@ -72,10 +72,10 @@ public void testOnTypeConflict() { IngestDocument document2 = new IngestDocument( Collections.singletonMap(rootKey, Collections.singletonMap("blub", "blab")), new HashMap<>() ); - assertThrowsOnComparision(document1, document2); + assertThrowsOnComparison(document1, document2); } - private static void assertThrowsOnComparision(IngestDocument document1, IngestDocument document2) { + private static void assertThrowsOnComparison(IngestDocument document1, IngestDocument document2) { expectThrows(AssertionError.class, () -> assertIngestDocument(document1, document2)); expectThrows(AssertionError.class, () -> assertIngestDocument(document2, document1)); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java index 0f4fbc571c128..dd8b4dbc67164 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/AbstractQueryTestCaseTests.java @@ -30,7 +30,7 @@ import java.util.stream.Collectors; import static java.util.Collections.singleton; -import static org.elasticsearch.test.AbstractQueryTestCase.alterateQueries; +import static org.elasticsearch.test.AbstractQueryTestCase.alternateQueries; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.notNullValue; @@ -40,17 +40,17 @@ */ public class AbstractQueryTestCaseTests extends ESTestCase { - public void testAlterateQueries() throws IOException { - List> alterations = alterateQueries(singleton("{\"field\": \"value\"}"), null); + public void testAlternateQueries() throws IOException { + List> alterations = alternateQueries(singleton("{\"field\": \"value\"}"), null); assertAlterations(alterations, allOf(notNullValue(), hasEntry("{\"newField\":{\"field\":\"value\"}}", true))); - alterations = alterateQueries(singleton("{\"term\":{\"field\": \"value\"}}"), null); + alterations = alternateQueries(singleton("{\"term\":{\"field\": \"value\"}}"), null); assertAlterations(alterations, allOf( hasEntry("{\"newField\":{\"term\":{\"field\":\"value\"}}}", true), hasEntry("{\"term\":{\"newField\":{\"field\":\"value\"}}}", true)) ); - alterations = alterateQueries(singleton("{\"bool\":{\"must\": [{\"match\":{\"field\":\"value\"}}]}}"), null); + alterations = alternateQueries(singleton("{\"bool\":{\"must\": [{\"match\":{\"field\":\"value\"}}]}}"), null); assertAlterations(alterations, allOf( hasEntry("{\"newField\":{\"bool\":{\"must\":[{\"match\":{\"field\":\"value\"}}]}}}", true), hasEntry("{\"bool\":{\"newField\":{\"must\":[{\"match\":{\"field\":\"value\"}}]}}}", true), @@ -58,7 +58,7 @@ public void testAlterateQueries() throws IOException { hasEntry("{\"bool\":{\"must\":[{\"match\":{\"newField\":{\"field\":\"value\"}}}]}}", true) )); - alterations = alterateQueries(singleton("{\"function_score\":" + + alterations = alternateQueries(singleton("{\"function_score\":" + "{\"query\": {\"term\":{\"foo\": \"bar\"}}, \"script_score\": {\"script\":\"a + 1\", \"params\": {\"a\":0}}}}"), null); assertAlterations(alterations, allOf( hasEntry("{\"newField\":{\"function_score\":{\"query\":{\"term\":{\"foo\":\"bar\"}},\"script_score\":{\"script\":\"a + " + @@ -76,14 +76,14 @@ public void testAlterateQueries() throws IOException { )); } - public void testAlterateQueriesWithArbitraryContent() throws IOException { + public void testAlternateQueriesWithArbitraryContent() throws IOException { Set arbitraryContentHolders = Sets.newHashSet("params", "doc"); Set queries = Sets.newHashSet( "{\"query\":{\"script\":\"test\",\"params\":{\"foo\":\"bar\"}}}", "{\"query\":{\"more_like_this\":{\"fields\":[\"a\",\"b\"],\"like\":{\"doc\":{\"c\":\"d\"}}}}}" ); - List> alterations = alterateQueries(queries, arbitraryContentHolders); + List> alterations = alternateQueries(queries, arbitraryContentHolders); assertAlterations(alterations, allOf( hasEntry("{\"newField\":{\"query\":{\"script\":\"test\",\"params\":{\"foo\":\"bar\"}}}}", true), hasEntry("{\"query\":{\"newField\":{\"script\":\"test\",\"params\":{\"foo\":\"bar\"}}}}", true), diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java index 0705eb32fc294..e9baaf1caa1ed 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java @@ -164,7 +164,7 @@ public void testRandomUniqueTotallyUnique() { assertThat(randomUnique(i::incrementAndGet, 100), hasSize(100)); } - public void testRandomUniqueNormalUsageAlwayMoreThanOne() { + public void testRandomUniqueNormalUsageAlwaysMoreThanOne() { assertThat(randomUnique(() -> randomAlphaOfLengthBetween(1, 20), 10), hasSize(greaterThan(0))); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 74efa66637481..a24a6ff26f99e 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -79,7 +79,7 @@ private static Collection> mockPlugins() { return Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); } - public void testInitializiationIsConsistent() { + public void testInitializationIsConsistent() { long clusterSeed = randomLong(); boolean masterNodes = randomBoolean(); int minNumDataNodes = randomIntBetween(0, 9); @@ -102,7 +102,7 @@ public void testInitializiationIsConsistent() { } /** - * a set of settings that are expected to have different values betweem clusters, even they have been initialized with the same + * a set of settings that are expected to have different values between clusters, even they have been initialized with the same * base settings. */ static final Set clusterUniqueSettings = new HashSet<>(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java b/test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java index cd3c3b2032331..01417e8a6b816 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java @@ -29,8 +29,8 @@ import static org.hamcrest.Matchers.equalTo; /** - * This test ensures that the cluster initializion for suite scope is not influencing - * the tests random sequence due to initializtion using the same random instance. + * This test ensures that the cluster initialization for suite scope is not influencing + * the tests random sequence due to initialization using the same random instance. */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class SuiteScopeClusterIT extends ESIntegTestCase { diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java b/test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java index 8ef383050184b..bae17e66d617e 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java @@ -26,8 +26,8 @@ import static org.hamcrest.Matchers.equalTo; /** - * This test ensures that the cluster initializion for TEST scope is not influencing - * the tests random sequence due to initializtion using the same random instance. + * This test ensures that the cluster initialization for TEST scope is not influencing + * the tests random sequence due to initialization using the same random instance. */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class TestScopeClusterIT extends ESIntegTestCase { diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc index 8248bcb082479..2bd66190fdb27 100644 --- a/x-pack/docs/en/security/auditing/overview.asciidoc +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -23,7 +23,7 @@ The {es} {security-features} provide two ways to persist audit logs: index. The audit index can reside on the same cluster, or a separate cluster. By default, only the `logfile` output is used when enabling auditing, -implicitly outputing to both `_audit.log` and `_access.log`. +implicitly outputting to both `_audit.log` and `_access.log`. To facilitate browsing and analyzing the events, you can also enable indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index 184fc76209339..0554be2f87286 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -159,7 +159,7 @@ user: <1> <1> The name of a role. <2> The distinguished name (DN) of a PKI user. -The disinguished name for a PKI user follows X.500 naming conventions which +The distinguished name for a PKI user follows X.500 naming conventions which place the most specific fields (like `cn` or `uid`) at the beginning of the name, and the most general fields (like `o` or `dc`) at the end of the name. Some tools, such as _openssl_, may print out the subject name in a different diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 2c11050a74753..ed2f50870f4af 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -251,7 +251,7 @@ additional names that can be used: `NameID` elements are an optional, but frequently provided, field within a SAML Assertion that the IdP may use to identify the Subject of that Assertion. In some cases the `NameID` will relate to the user's login - identifier (username) wihin the IdP, but in many cases they will be + identifier (username) within the IdP, but in many cases they will be internally generated identifiers that have no obvious meaning outside of the IdP. @@ -531,7 +531,7 @@ The path to the PEM formatted certificate file. e.g. `saml/saml-sign.crt` The path to the PEM formatted key file. e.g. `saml/saml-sign.key` `signing.secure_key_passphrase`:: -The passphrase for the key, if the file is encypted. This is a +The passphrase for the key, if the file is encrypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. @@ -545,7 +545,7 @@ The path to the PKCS#12 or JKS keystore. e.g. `saml/saml-sign.p12` The alias of the key within the keystore. e.g. `signing-key` `signing.keystore.secure_password`:: -The passphrase for the keystore, if the file is encypted. This is a +The passphrase for the keystore, if the file is encrypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. @@ -582,7 +582,7 @@ The path to the PEM formatted certificate file. e.g. `saml/saml-crypt.crt` The path to the PEM formatted key file. e.g. `saml/saml-crypt.key` `encryption.secure_key_passphrase`:: -The passphrase for the key, if the file is encypted. This is a +The passphrase for the key, if the file is encrypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. @@ -596,7 +596,7 @@ The path to the PKCS#12 or JKS keystore. e.g. `saml/saml-crypt.p12` The alias of the key within the keystore. e.g. `encryption-key` `encryption.keystore.secure_password`:: -The passphrase for the keystore, if the file is encypted. This is a +The passphrase for the keystore, if the file is encrypted. This is a {ref}/secure-settings.html[secure setting] that must be set with the `elasticsearch-keystore` tool. @@ -728,7 +728,7 @@ the certificates that {es} has been configured to use. SAML authentication in {kib} is also subject to the `xpack.security.sessionTimeout` setting that is described in the {kib} security -documentation, and you may wish to adjst this timeout to meet your local needs. +documentation, and you may wish to adjust this timeout to meet your local needs. The two additional settings that are required for SAML support are shown below: diff --git a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc index 6b7ff26cbf3eb..ca22ceeebbe22 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc @@ -56,7 +56,7 @@ http://elasticsearch-py.readthedocs.org/en/master/#ssl-and-authentication[Python https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION[Perl], http://www.elastic.co/guide/en/elasticsearch/client/php-api/current/_security.html[PHP], http://nest.azurewebsites.net/elasticsearch-net/security.html[.NET], -http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Javascript] +http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[JavaScript] //// Groovy - TODO link diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 7e527530f4291..67844290cd051 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -198,7 +198,7 @@ image::images/action-throttling.jpg[align="center"] When a watch is triggered, its condition determines whether or not to execute the watch actions. Within each action, you can also add a condition per action. These additional conditions enable a single alert to execute different actions depending -on a their respective conditions. The following watch would alway send an email, when +on a their respective conditions. The following watch would always send an email, when hits are found from the input search, but only trigger the `notify_pager` action when there are more than 5 hits in the search result. diff --git a/x-pack/docs/en/watcher/customizing-watches.asciidoc b/x-pack/docs/en/watcher/customizing-watches.asciidoc index ea44b8aa231b7..27aa2baef04b7 100644 --- a/x-pack/docs/en/watcher/customizing-watches.asciidoc +++ b/x-pack/docs/en/watcher/customizing-watches.asciidoc @@ -49,7 +49,7 @@ initial payload. A <> input contains a `request` object that specifies the indices you want to search, the {ref}/search-request-search-type.html[search type], and the search request body. The `body` field of a search input is the same as -the body of an Elasticsearch `_search` request, making the full Elaticsearch +the body of an Elasticsearch `_search` request, making the full Elasticsearch Query DSL available for you to use. For example, the following `search` input loads the latest VIX quote: diff --git a/x-pack/docs/en/watcher/release-notes.asciidoc b/x-pack/docs/en/watcher/release-notes.asciidoc index 627c45829d3e2..5875458a15433 100644 --- a/x-pack/docs/en/watcher/release-notes.asciidoc +++ b/x-pack/docs/en/watcher/release-notes.asciidoc @@ -121,7 +121,7 @@ March 30, 2016 .New Features * Added <> * Added support for adding <> - via HTTP requests and superceding and deprecating the usage of `attach_data` + via HTTP requests and superseding and deprecating the usage of `attach_data` in order to use this feature [float] @@ -143,7 +143,7 @@ February 2, 2016 February 2, 2016 .Enhancements -* Adds support for Elasticssearch 2.1.2 +* Adds support for Elasticsearch 2.1.2 [float] ==== 2.1.1 diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 0a7900d004b7b..bca7b747e6c24 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -387,7 +387,7 @@ private void autoFollowIndices(final AutoFollowMetadata autoFollowMetadata, cleanFollowedRemoteIndices(remoteClusterState, patterns); } - private void checkAutoFollowPattern(String autoFollowPattenName, + private void checkAutoFollowPattern(String autoFollowPatternName, String remoteCluster, AutoFollowPattern autoFollowPattern, List leaderIndicesToFollow, @@ -409,9 +409,9 @@ private void checkAutoFollowPattern(String autoFollowPattenName, .collect(Collectors.toList()); if (otherMatchingPatterns.size() != 0) { results.set(slot, new Tuple<>(indexToFollow, new ElasticsearchException("index to follow [" + indexToFollow.getName() + - "] for pattern [" + autoFollowPattenName + "] matches with other patterns " + otherMatchingPatterns + ""))); + "] for pattern [" + autoFollowPatternName + "] matches with other patterns " + otherMatchingPatterns + ""))); if (leaderIndicesCountDown.countDown()) { - resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); + resultHandler.accept(new AutoFollowResult(autoFollowPatternName, results.asList())); } } else { final Settings leaderIndexSettings = remoteMetadata.getIndexSafe(indexToFollow).getSettings(); @@ -421,31 +421,31 @@ private void checkAutoFollowPattern(String autoFollowPattenName, String message = String.format(Locale.ROOT, "index [%s] cannot be followed, because soft deletes are not enabled", indexToFollow.getName()); LOGGER.warn(message); - updateAutoFollowMetadata(recordLeaderIndexAsFollowFunction(autoFollowPattenName, indexToFollow), error -> { + updateAutoFollowMetadata(recordLeaderIndexAsFollowFunction(autoFollowPatternName, indexToFollow), error -> { ElasticsearchException failure = new ElasticsearchException(message); if (error != null) { failure.addSuppressed(error); } results.set(slot, new Tuple<>(indexToFollow, failure)); if (leaderIndicesCountDown.countDown()) { - resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); + resultHandler.accept(new AutoFollowResult(autoFollowPatternName, results.asList())); } }); continue; } else if (leaderIndexAlreadyFollowed(autoFollowPattern, indexToFollow, localMetadata)) { - updateAutoFollowMetadata(recordLeaderIndexAsFollowFunction(autoFollowPattenName, indexToFollow), error -> { + updateAutoFollowMetadata(recordLeaderIndexAsFollowFunction(autoFollowPatternName, indexToFollow), error -> { results.set(slot, new Tuple<>(indexToFollow, error)); if (leaderIndicesCountDown.countDown()) { - resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); + resultHandler.accept(new AutoFollowResult(autoFollowPatternName, results.asList())); } }); continue; } - followLeaderIndex(autoFollowPattenName, remoteCluster, indexToFollow, autoFollowPattern, headers, error -> { + followLeaderIndex(autoFollowPatternName, remoteCluster, indexToFollow, autoFollowPattern, headers, error -> { results.set(slot, new Tuple<>(indexToFollow, error)); if (leaderIndicesCountDown.countDown()) { - resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList())); + resultHandler.accept(new AutoFollowResult(autoFollowPatternName, results.asList())); } }); } @@ -471,7 +471,7 @@ private static boolean leaderIndexAlreadyFollowed(AutoFollowPattern autoFollowPa return false; } - private void followLeaderIndex(String autoFollowPattenName, + private void followLeaderIndex(String autoFollowPatternName, String remoteCluster, Index indexToFollow, AutoFollowPattern pattern, @@ -504,7 +504,7 @@ private void followLeaderIndex(String autoFollowPattenName, // This function updates the auto follow metadata in the cluster to record that the leader index has been followed: // (so that we do not try to follow it in subsequent auto follow runs) - Function function = recordLeaderIndexAsFollowFunction(autoFollowPattenName, indexToFollow); + Function function = recordLeaderIndexAsFollowFunction(autoFollowPatternName, indexToFollow); // The coordinator always runs on the elected master node, so we can update cluster state here: updateAutoFollowMetadata(function, onResult); }; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java index 8ab66aec8e80b..439038b222bf5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java @@ -71,10 +71,10 @@ protected void doExecute( @Override protected FollowStatsAction.StatsResponses newResponse( final FollowStatsAction.StatsRequest request, - final List statsRespons, + final List statsResponse, final List taskOperationFailures, final List failedNodeExceptions) { - return new FollowStatsAction.StatsResponses(taskOperationFailures, failedNodeExceptions, statsRespons); + return new FollowStatsAction.StatsResponses(taskOperationFailures, failedNodeExceptions, statsResponse); } @Override diff --git a/x-pack/plugin/core/src/main/config/log4j2.properties b/x-pack/plugin/core/src/main/config/log4j2.properties index 21b0732fed418..4634ef7fb7e30 100644 --- a/x-pack/plugin/core/src/main/config/log4j2.properties +++ b/x-pack/plugin/core/src/main/config/log4j2.properties @@ -38,7 +38,7 @@ appender.audit_rolling.layout.pattern = {\ # "host.name" unresolved hostname of the local node # "host.ip" the local bound ip (i.e. the ip listening for connections) # "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) -# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. +# "event.action" the name of the audited event, e.g. "authentication_failed", "access_granted", "run_as_granted", etc. # "user.name" the subject name as authenticated by a realm # "user.run_by.name" the original authenticated subject name that is impersonating another one. # "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index 513400cb72c3c..044dfa8899c74 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -198,7 +198,7 @@ private synchronized DirectoryReader getReader() throws IOException { @SuppressForbidden( reason = "we manage references explicitly here") public Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException { store.incRef(); - boolean releaseRefeference = true; + boolean releaseReference = true; try { final boolean maybeOpenReader; switch (source) { @@ -235,10 +235,10 @@ public Searcher acquireSearcher(String source, SearcherScope scope) throws Engin LazyDirectoryReader lazyDirectoryReader = new LazyDirectoryReader(reader, this); Searcher newSearcher = new Searcher(source, new IndexSearcher(lazyDirectoryReader), () -> IOUtils.close(lazyDirectoryReader, store::decRef)); - releaseRefeference = false; + releaseReference = false; return newSearcher; } finally { - if (releaseRefeference) { + if (releaseReference) { reader.decRef(); // don't call close here we manage reference ourselves } } @@ -246,7 +246,7 @@ public Searcher acquireSearcher(String source, SearcherScope scope) throws Engin } catch (IOException e) { throw new UncheckedIOException(e); } finally { - if (releaseRefeference) { + if (releaseReference) { store.decRef(); } } @@ -264,7 +264,7 @@ static LazyDirectoryReader unwrapLazyReader(DirectoryReader reader) { /* * We register this listener for a frozen index that will - * 1. reset the reader every time the search context is validated which happens when the context is looked up ie. on a fetch phase + * 1. reset the reader every time the search context is validated which happens when the context is looked up i.e. on a fetch phase * etc. * 2. register a releasable resource that is cleaned after each phase that releases the reader for this searcher */ @@ -337,7 +337,7 @@ synchronized void release() throws IOException { delegate = null; if (tryIncRef()) { // only do this if we are not closed already // we end up in this case when we are not closed but in an intermediate - // state were we want to release all or the real leaf readers ie. in between search phases + // state were we want to release all or the real leaf readers i.e. in between search phases // but still want to keep this Lazy reference open. In oder to let the heavy real leaf // readers to be GCed we need to null our the references. try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReader.java index 32ad58b88b5d0..431a6a6022f2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReader.java @@ -32,7 +32,7 @@ /** * This special DirectoryReader is used to handle can_match requests against frozen indices. - * It' caches all relevant point value data for every point value field ie. min/max packed values etc. + * It' caches all relevant point value data for every point value field i.e. min/max packed values etc. * to hold enough information to rewrite a date range query and make a decisions if an index can match or not. * This allows frozen indices to be searched with wildcards in a very efficient way without opening a reader on them. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index b2130ac9f4b81..3abb6771e2efa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -769,7 +769,7 @@ public Builder validate() { } /** - * Returns true iff the license is a production licnese + * Returns true iff the license is a production license */ public boolean isProductionLicense() { switch (operationMode()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index dbf11026f4709..e2d8bf25958f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -516,7 +516,7 @@ static SchedulerEngine.Schedule nextLicenseCheck(License license) { // when we encounter a license with a future issue date // which can happen with autogenerated license, // we want to schedule a notification on the license issue date - // so the license is notificed once it is valid + // so the license is notified once it is valid // see https://github.com/elastic/x-plugins/issues/983 return license.issueDate(); } else if (time < license.expiryDate()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java index b8e6446b9f49f..b6e6d60aa414f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java @@ -115,7 +115,7 @@ private synchronized void onChange(Path file) { } } } finally { - // set this after the fact to prevent that we are jumping back and forth first setting to defautl and then reading the + // set this after the fact to prevent that we are jumping back and forth first setting to default and then reading the // actual op mode resetting it. this.currentOperationMode = newOperationMode; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java index 3934095512120..55faadee9aaf4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/common/ProtocolUtils.java @@ -16,7 +16,7 @@ public final class ProtocolUtils { /** * Implements equals for a map of string arrays * - * The map of string arrays is used in some XPack protocol classes but does't work with equal. + * The map of string arrays is used in some XPack protocol classes but doesn't work with equal. */ public static boolean equals(Map a, Map b) { if (a == null) { @@ -47,7 +47,7 @@ public static boolean equals(Map a, Map b) { /** * Implements hashCode for map of string arrays * - * The map of string arrays does't work with hashCode. + * The map of string arrays doesn't work with hashCode. */ public static int hashCode(Map a) { int hash = 0; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index 196982c0a35fb..aedeb513b0184 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -283,7 +283,7 @@ public boolean returnDetailedInfo() { /** * Add a stage in the graph exploration. Each hop represents a stage of - * querying elasticsearch to identify terms which can then be connnected to + * querying elasticsearch to identify terms which can then be connected to * other terms in a subsequent hop. * * @param guidingQuery diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java index b44e192f407ac..cd82f70a79592 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreResponse.java @@ -202,7 +202,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "GraphExploreResponsenParser", true, + "GraphExploreResponseParser", true, args -> { GraphExploreResponse result = new GraphExploreResponse(); result.vertices = new HashMap<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index 18e96619ec822..deb8c6c4388a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -80,7 +80,7 @@ public void initializeSnapshot(SnapshotId snapshotId, List indices, Met IndexMetaData index = metaData.index(indexId.getName()); IndexMetaData.Builder indexMetadataBuilder = IndexMetaData.builder(index); // for a minimal restore we basically disable indexing on all fields and only create an index - // that is valid from an operational perspective. ie. it will have all metadata fields like version/ + // that is valid from an operational perspective. i.e. it will have all metadata fields like version/ // seqID etc. and an indexed ID field such that we can potentially perform updates on them or delete documents. ImmutableOpenMap mappings = index.getMappings(); Iterator> iterator = mappings.iterator(); @@ -108,7 +108,7 @@ public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexShardSnapshotStatus snapshotStatus) { if (shard.mapperService().documentMapper() != null // if there is no mapping this is null && shard.mapperService().documentMapper().sourceMapper().isComplete() == false) { - throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + + throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source i.e. has _source disabled " + "or filters the source"); } ShardPath shardPath = shard.shardPath(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java index 37456f234648a..812a28e0ce090 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java @@ -116,7 +116,7 @@ public GraphExploreRequestBuilder setTypes(String... types) { /** * Add a stage in the graph exploration. Each hop represents a stage of - * querying elasticsearch to identify terms which can then be connnected + * querying elasticsearch to identify terms which can then be connected * to other terms in a subsequent hop. * @param guidingQuery optional choice of query which influences which documents * are considered in this stage diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java index 2ef9d242d9ef4..022e0668cf766 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -26,7 +26,7 @@ public abstract class IndexerJobStats implements ToXContentObject, Writeable { protected long numPages = 0; protected long numInputDocuments = 0; - protected long numOuputDocuments = 0; + protected long numOutputDocuments = 0; protected long numInvocations = 0; protected long indexTime = 0; protected long searchTime = 0; @@ -41,12 +41,12 @@ public abstract class IndexerJobStats implements ToXContentObject, Writeable { public IndexerJobStats() { } - public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations, + public IndexerJobStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations, long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) { this.numPages = numPages; this.numInputDocuments = numInputDocuments; - this.numOuputDocuments = numOuputDocuments; + this.numOutputDocuments = numOutputDocuments; this.numInvocations = numInvocations; this.indexTime = indexTime; this.searchTime = searchTime; @@ -59,7 +59,7 @@ public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocum public IndexerJobStats(StreamInput in) throws IOException { this.numPages = in.readVLong(); this.numInputDocuments = in.readVLong(); - this.numOuputDocuments = in.readVLong(); + this.numOutputDocuments = in.readVLong(); this.numInvocations = in.readVLong(); if (in.getVersion().onOrAfter(Version.V_6_6_0)) { this.indexTime = in.readVLong(); @@ -84,7 +84,7 @@ public long getNumInvocations() { } public long getOutputDocuments() { - return numOuputDocuments; + return numOutputDocuments; } public long getIndexFailures() { @@ -128,7 +128,7 @@ public void incrementNumInvocations(long n) { public void incrementNumOutputDocuments(long n) { assert(n >= 0); - numOuputDocuments += n; + numOutputDocuments += n; } public void incrementIndexingFailures() { @@ -161,7 +161,7 @@ public void markEndSearch() { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(numPages); out.writeVLong(numInputDocuments); - out.writeVLong(numOuputDocuments); + out.writeVLong(numOutputDocuments); out.writeVLong(numInvocations); if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeVLong(indexTime); @@ -187,7 +187,7 @@ public boolean equals(Object other) { return Objects.equals(this.numPages, that.numPages) && Objects.equals(this.numInputDocuments, that.numInputDocuments) - && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numOutputDocuments, that.numOutputDocuments) && Objects.equals(this.numInvocations, that.numInvocations) && Objects.equals(this.indexTime, that.indexTime) && Objects.equals(this.searchTime, that.searchTime) @@ -199,7 +199,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations, + return Objects.hash(numPages, numInputDocuments, numOutputDocuments, numInvocations, indexTime, searchTime, indexFailures, searchFailures, indexTotal, searchTotal); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java index d67a20b758018..fe0e926efbd7e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/AsyncWaitStep.java @@ -32,7 +32,7 @@ protected Client getClient() { public interface Listener { - void onResponse(boolean conditionMet, ToXContentObject infomationContext); + void onResponse(boolean conditionMet, ToXContentObject informationContext); void onFailure(Exception e); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java index d8fb5d651852f..b46c3a365ffa8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ClusterStateWaitStep.java @@ -24,19 +24,19 @@ public ClusterStateWaitStep(StepKey key, StepKey nextStepKey) { public static class Result { private final boolean complete; - private final ToXContentObject infomationContext; + private final ToXContentObject informationContext; - public Result(boolean complete, ToXContentObject infomationContext) { + public Result(boolean complete, ToXContentObject informationContext) { this.complete = complete; - this.infomationContext = infomationContext; + this.informationContext = informationContext; } public boolean isComplete() { return complete; } - public ToXContentObject getInfomationContext() { - return infomationContext; + public ToXContentObject getInformationContext() { + return informationContext; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index e78649d152296..7b6e15af4394f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -74,7 +74,7 @@ public static DatafeedState getDatafeedState(String datafeedId, @Nullable Persis return (DatafeedState) task.getState(); } else { // If we haven't started a datafeed then there will be no persistent task, - // which is the same as if the datafeed was't started + // which is the same as if the datafeed wasn't started return DatafeedState.STOPPED; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index 314a533914a6a..489c47807772e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -410,7 +410,7 @@ public Collection allInputFields() { // remove empty strings allFields.remove(""); - // the categorisation field isn't an input field + // the categorization field isn't an input field allFields.remove(AnalysisConfig.ML_CATEGORY_FIELD); return allFields; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index a0519697e5909..68a6058bfb9cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -83,7 +83,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, @Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval, @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, - @Nullable Long modelSnapshotRetentionDays, @Nullable List categorisationFilters, + @Nullable Long modelSnapshotRetentionDays, @Nullable List categorizationFilters, @Nullable Map customSettings, @Nullable String modelSnapshotId, @Nullable Version modelSnapshotMinVersion, @Nullable Version jobVersion, @Nullable Boolean clearJobFinishTime) { this.jobId = jobId; @@ -96,7 +96,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String this.backgroundPersistInterval = backgroundPersistInterval; this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; this.resultsRetentionDays = resultsRetentionDays; - this.categorizationFilters = categorisationFilters; + this.categorizationFilters = categorizationFilters; this.customSettings = customSettings; this.modelSnapshotId = modelSnapshotId; this.modelSnapshotMinVersion = modelSnapshotMinVersion; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java index b13e702e85de3..4d72849c7ecdb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java @@ -421,7 +421,7 @@ public void updateLatestRecordTimeStamp(Date latestRecordTimeStamp) { /** * The wall clock time the latest record was seen. * - * @return Wall clock time of the lastest record + * @return Wall clock time of the latest record */ public Date getLastDataTimeStamp() { return lastDataTimeStamp; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java index f038228ae76a9..81851940a1f81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java @@ -54,10 +54,10 @@ public RollupIndexerJobStats() { super(); } - public RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations, + public RollupIndexerJobStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations, long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) { - super(numPages, numInputDocuments, numOuputDocuments, numInvocations, indexTime, searchTime, + super(numPages, numInputDocuments, numOutputDocuments, numInvocations, indexTime, searchTime, indexTotal, searchTotal, indexFailures, searchFailures); } @@ -70,7 +70,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NUM_PAGES.getPreferredName(), numPages); builder.field(NUM_INPUT_DOCUMENTS.getPreferredName(), numInputDocuments); - builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOuputDocuments); + builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOutputDocuments); builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); builder.field(INDEX_TIME_IN_MS.getPreferredName(), indexTime); builder.field(INDEX_TOTAL.getPreferredName(), indexTotal); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java index d4ccc22d32ab4..8d7f8b7c347c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -614,7 +614,7 @@ public long getNextValidTimeAfter(final long time) { cl.set(Calendar.MONTH, mon); // no '- 1' here because we are promoting the month continue; - } else if (daysToAdd > 0) { // are we swithing days? + } else if (daysToAdd > 0) { // are we switching days? cl.set(Calendar.SECOND, 0); cl.set(Calendar.MINUTE, 0); cl.set(Calendar.HOUR_OF_DAY, 0); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java index a93476bbdc8da..8f57b402a59d3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java @@ -421,7 +421,7 @@ private static String encode_base64(byte d[], int len) /** * Look up the 3 bits base64-encoded by the specified character, - * range-checking againt conversion table + * range-checking against conversion table * @param x the base64-encoded value * @return the decoded value of x */ @@ -511,7 +511,7 @@ private void encipher(int lr[], int off) { } /** - * Cycically extract a word of key material + * Cyclically extract a word of key material * @param data the string to extract the data from * @param offp a "pointer" (as a one-entry array) to the * current offset into data diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 6d3864aa3eba2..3cf0ff29c6874 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -151,7 +151,7 @@ public Fields getTermVectors(int docID) throws IOException { return null; } f = new FieldFilterFields(f); - // we need to check for emptyness, so we can return null: + // we need to check for emptiness, so we can return null: return f.iterator().hasNext() ? f : null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index 7e45b893fed6b..d0a12e22ca8fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -165,7 +165,7 @@ public FieldPermissionsDefinition getFieldPermissionsDefinition() { return fieldPermissionsDefinition; } - /** Return whether field-level security is enabled, ie. whether any field might be filtered out. */ + /** Return whether field-level security is enabled, i.e. whether any field might be filtered out. */ public boolean hasFieldLevelSecurity() { return permittedFieldsAutomatonIsTotal == false; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java index a7faf4d223108..51ebaeee58ba4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java @@ -135,7 +135,7 @@ public ActionFuture clearRealmCache(ClearRealmCacheRequ ****************/ /** - * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is + * Clears the roles cache. This API only works for the native roles that are stored in an elasticsearch index. It is * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache * cleared. */ @@ -144,7 +144,7 @@ public ClearRolesCacheRequestBuilder prepareClearRolesCache() { } /** - * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is + * Clears the roles cache. This API only works for the native roles that are stored in an elasticsearch index. It is * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache * cleared. */ @@ -153,7 +153,7 @@ public void clearRolesCache(ClearRolesCacheRequest request, ActionListener filesToMonitor(@Nullable Environment environment); /** - * {@inheritDoc}. Declared as abstract to force implementors to provide a custom implementation + * {@inheritDoc}. Declared as abstract to force implementers to provide a custom implementation */ public abstract String toString(); /** - * {@inheritDoc}. Declared as abstract to force implementors to provide a custom implementation + * {@inheritDoc}. Declared as abstract to force implementers to provide a custom implementation */ public abstract boolean equals(Object o); /** - * {@inheritDoc}. Declared as abstract to force implementors to provide a custom implementation + * {@inheritDoc}. Declared as abstract to force implementers to provide a custom implementation */ public abstract int hashCode(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java index 3fe07dddacbca..b8ca2d20c6db4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java @@ -79,7 +79,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } /** - * {@code Failure} is a {@link StoppedResult} with a status of {@link Status#FAILURE} for actiosn that have failed unexpectedly + * {@code Failure} is a {@link StoppedResult} with a status of {@link Status#FAILURE} for actions that have failed unexpectedly * (e.g., an exception was thrown in a place that wouldn't expect one, like transformation or an HTTP request). */ public static class Failure extends StoppedResult { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java index 34f4af70b8354..c70b154dd3c05 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java @@ -16,7 +16,7 @@ /** * This throttler throttles the action based on its last successful execution time. If the time passed since - * the last successful execution is lower than the given period, the aciton will be throttled. + * the last successful execution is lower than the given period, the action will be throttled. */ public class PeriodThrottler implements Throttler { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java index 8bd999ebfd235..e4089a12eabb2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -50,7 +50,7 @@ public class CryptoService { // also provides authentication of the encrypted data, which is something that we are // missing here. private static final String DEFAULT_ENCRYPTION_ALGORITHM = "AES/CTR/NoPadding"; - private static final String DEFAULT_KEY_ALGORITH = "AES"; + private static final String DEFAULT_KEY_ALGORITHM = "AES"; private static final int DEFAULT_KEY_LENGTH = 128; private static final Setting ENCRYPTION_ALGO_SETTING = @@ -58,7 +58,7 @@ public class CryptoService { private static final Setting ENCRYPTION_KEY_LENGTH_SETTING = Setting.intSetting(SecurityField.setting("encryption_key.length"), DEFAULT_KEY_LENGTH, Property.NodeScope); private static final Setting ENCRYPTION_KEY_ALGO_SETTING = - new Setting<>(SecurityField.setting("encryption_key.algorithm"), DEFAULT_KEY_ALGORITH, s -> s, Property.NodeScope); + new Setting<>(SecurityField.setting("encryption_key.algorithm"), DEFAULT_KEY_ALGORITHM, s -> s, Property.NodeScope); private static final Logger logger = LogManager.getLogger(CryptoService.class); private final SecureRandom secureRandom = new SecureRandom(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java index 9f8f623f487ef..511b40321cd0e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java @@ -40,9 +40,9 @@ public enum ActionExecutionMode { private final boolean force; private final boolean simulate; - ActionExecutionMode(byte id, boolean froce, boolean simulate) { + ActionExecutionMode(byte id, boolean force, boolean simulate) { this.id = id; - this.force = froce; + this.force = force; this.simulate = simulate; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java index bece3e5a6f50a..dc2777a536cc2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java @@ -144,14 +144,14 @@ public void setRecordExecution(boolean recordExecution) { } /** - * @return The alertnative input to use (may be null) + * @return The alternative input to use (may be null) */ public Map getAlternativeInput() { return alternativeInput; } /** - * @param alternativeInput Set's the alernative input + * @param alternativeInput Set's the alternative input */ public void setAlternativeInput(Map alternativeInput) { this.alternativeInput = alternativeInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java index e67424462418d..2e3be24cc5593 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java @@ -54,7 +54,7 @@ public ExecuteWatchRequestBuilder setRecordExecution(boolean recordExecution) { } /** - * @param alternativeInput Set's the alernative input + * @param alternativeInput Set's the alternative input */ public ExecuteWatchRequestBuilder setAlternativeInput(Map alternativeInput) { request.setAlternativeInput(alternativeInput); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java index 93e713bb8844e..57047b80fed6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java @@ -129,7 +129,7 @@ public int hashCode() { } /** - * Called whenever an watch is checked, ie. the condition of the watch is evaluated to see if + * Called whenever an watch is checked, i.e. the condition of the watch is evaluated to see if * the watch should be executed. * * @param metCondition indicates whether the watch's condition was met. @@ -151,7 +151,7 @@ public void onActionResult(String actionId, DateTime timestamp, Action.Result re } /** - * Notifies this status that the givne actions were acked. If the current state of one of these actions is + * Notifies this status that the given actions were acked. If the current state of one of these actions is * {@link ActionStatus.AckStatus.State#ACKABLE ACKABLE}, * then we'll it'll change to {@link ActionStatus.AckStatus.State#ACKED ACKED} * (when set to {@link ActionStatus.AckStatus.State#ACKED ACKED}, the AckThrottler diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 76b735dc78a38..e320b0818c38c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -41,7 +41,7 @@ void assertAllowed(OperationMode mode, boolean active, Predicate mode != GOLD && mode != STANDARD); - assertAckMesssages(XPackField.SECURITY, BASIC, toMode, 0); + assertAckMessages(XPackField.SECURITY, BASIC, toMode, 0); } public void testSecurityAckAnyToTrialOrPlatinum() { - assertAckMesssages(XPackField.SECURITY, randomMode(), randomTrialOrPlatinumMode(), 0); + assertAckMessages(XPackField.SECURITY, randomMode(), randomTrialOrPlatinumMode(), 0); } public void testSecurityAckTrialStandardGoldOrPlatinumToBasic() { - assertAckMesssages(XPackField.SECURITY, randomTrialStandardGoldOrPlatinumMode(), BASIC, 3); + assertAckMessages(XPackField.SECURITY, randomTrialStandardGoldOrPlatinumMode(), BASIC, 3); } public void testSecurityAckAnyToStandard() { OperationMode from = randomFrom(BASIC, GOLD, PLATINUM, TRIAL); - assertAckMesssages(XPackField.SECURITY, from, STANDARD, 4); + assertAckMessages(XPackField.SECURITY, from, STANDARD, 4); } public void testSecurityAckBasicStandardTrialOrPlatinumToGold() { OperationMode from = randomFrom(BASIC, PLATINUM, TRIAL, STANDARD); - assertAckMesssages(XPackField.SECURITY, from, GOLD, 2); + assertAckMessages(XPackField.SECURITY, from, GOLD, 2); } public void testMonitoringAckBasicToAny() { - assertAckMesssages(XPackField.MONITORING, BASIC, randomMode(), 0); + assertAckMessages(XPackField.MONITORING, BASIC, randomMode(), 0); } public void testMonitoringAckAnyToTrialGoldOrPlatinum() { - assertAckMesssages(XPackField.MONITORING, randomMode(), randomTrialStandardGoldOrPlatinumMode(), 0); + assertAckMessages(XPackField.MONITORING, randomMode(), randomTrialStandardGoldOrPlatinumMode(), 0); } public void testMonitoringAckNotBasicToBasic() { OperationMode from = randomFrom(STANDARD, GOLD, PLATINUM, TRIAL); - assertAckMesssages(XPackField.MONITORING, from, BASIC, 2); + assertAckMessages(XPackField.MONITORING, from, BASIC, 2); } public void testMonitoringAllowed() { @@ -447,11 +447,11 @@ public void testSqlPlatinumExpired() { } public void testSqlAckAnyToTrialOrPlatinum() { - assertAckMesssages(XPackField.SQL, randomMode(), randomTrialOrPlatinumMode(), 0); + assertAckMessages(XPackField.SQL, randomMode(), randomTrialOrPlatinumMode(), 0); } public void testSqlAckTrialOrPlatinumToNotTrialOrPlatinum() { - assertAckMesssages(XPackField.SQL, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); + assertAckMessages(XPackField.SQL, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index a025b07a8eb6d..708795118092d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -102,10 +102,10 @@ public void testSnapshotAndRestore() throws Exception { final String sourceIdx = "test-idx"; boolean requireRouting = randomBoolean(); boolean useNested = randomBoolean(); - IndexRequestBuilder[] builders = snashotAndRestore(sourceIdx, 1, true, requireRouting, useNested); + IndexRequestBuilder[] builders = snapshotAndRestore(sourceIdx, 1, true, requireRouting, useNested); IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(sourceIdx).clear().setDocs(true).get(); long deleted = indicesStatsResponse.getTotal().docs.getDeleted(); - boolean sourceHadDeletions = deleted > 0; // we use indexRandom which might create holes ie. deleted docs + boolean sourceHadDeletions = deleted > 0; // we use indexRandom which might create holes i.e. deleted docs assertHits(sourceIdx, builders.length, sourceHadDeletions); assertMappings(sourceIdx, requireRouting, useNested); SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> { @@ -132,7 +132,7 @@ public void testSnapshotAndRestore() throws Exception { public void testSnapshotAndRestoreWithNested() throws Exception { final String sourceIdx = "test-idx"; boolean requireRouting = randomBoolean(); - IndexRequestBuilder[] builders = snashotAndRestore(sourceIdx, 1, true, requireRouting, true); + IndexRequestBuilder[] builders = snapshotAndRestore(sourceIdx, 1, true, requireRouting, true); IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().clear().setDocs(true).get(); assertThat(indicesStatsResponse.getTotal().docs.getDeleted(), Matchers.greaterThan(0L)); assertHits(sourceIdx, builders.length, true); @@ -218,7 +218,7 @@ private void assertHits(String index, int numDocsExpected, boolean sourceHadDele } - private IndexRequestBuilder[] snashotAndRestore(String sourceIdx, int numShards, boolean minimal, boolean requireRouting, boolean + private IndexRequestBuilder[] snapshotAndRestore(String sourceIdx, int numShards, boolean minimal, boolean requireRouting, boolean useNested) throws ExecutionException, InterruptedException, IOException { logger.info("--> starting a master node and a data node"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 28244b523e129..a7cb04557e54c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -99,7 +99,7 @@ public void testSourceIncomplete() throws IOException { runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard, shard.store(), snapshotId, indexId, snapshotRef.getIndexCommit(), indexShardSnapshotStatus))); - assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" + assertEquals("Can't snapshot _source only on an index that has incomplete source i.e. has _source disabled or filters the source" , illegalStateException.getMessage()); } closeShards(shard); @@ -163,7 +163,7 @@ private String randomDoc() { return "{ \"value\" : \"" + randomAlphaOfLength(10) + "\"}"; } - public void testRestoreMinmal() throws IOException { + public void testRestoreMinimal() throws IOException { IndexShard shard = newStartedShard(true); int numInitialDocs = randomIntBetween(10, 100); for (int i = 0; i < numInitialDocs; i++) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java index 48db059b2178e..c0e45e4342da2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java @@ -56,7 +56,7 @@ public void testSourceOnlyRandom() throws IOException { final String softDeletesField = writer.w.getConfig().getSoftDeletesField(); // we either use the soft deletes directly or manually delete them to test the additional delete functionality boolean modifyDeletedDocs = softDeletesField != null && randomBoolean(); - SourceOnlySnapshot snapshoter = new SourceOnlySnapshot(targetDir, + SourceOnlySnapshot snapshotter = new SourceOnlySnapshot(targetDir, modifyDeletedDocs ? () -> new DocValuesFieldExistsQuery(softDeletesField) : null) { @Override DirectoryReader wrapReader(DirectoryReader reader) throws IOException { @@ -80,7 +80,7 @@ DirectoryReader wrapReader(DirectoryReader reader) throws IOException { } IndexCommit snapshot = deletionPolicy.snapshot(); try { - snapshoter.syncSnapshot(snapshot); + snapshotter.syncSnapshot(snapshot); } finally { deletionPolicy.release(snapshot); } @@ -91,9 +91,9 @@ DirectoryReader wrapReader(DirectoryReader reader) throws IOException { } IndexCommit snapshot = deletionPolicy.snapshot(); try { - snapshoter.syncSnapshot(snapshot); - try (DirectoryReader snapReader = snapshoter.wrapReader(DirectoryReader.open(targetDir)); - DirectoryReader wrappedReader = snapshoter.wrapReader(DirectoryReader.open(snapshot))) { + snapshotter.syncSnapshot(snapshot); + try (DirectoryReader snapReader = snapshotter.wrapReader(DirectoryReader.open(targetDir)); + DirectoryReader wrappedReader = snapshotter.wrapReader(DirectoryReader.open(snapshot))) { DirectoryReader reader = modifyDeletedDocs ? new SoftDeletesDirectoryReaderWrapper(wrappedReader, softDeletesField) : new DropFullDeletedSegmentsReader(wrappedReader); @@ -167,8 +167,8 @@ public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, writer.commit(); Directory targetDir = newDirectory(); IndexCommit snapshot = deletionPolicy.snapshot(); - SourceOnlySnapshot snapshoter = new SourceOnlySnapshot(targetDir); - snapshoter.syncSnapshot(snapshot); + SourceOnlySnapshot snapshotter = new SourceOnlySnapshot(targetDir); + snapshotter.syncSnapshot(snapshot); StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(snapshot); try (DirectoryReader snapReader = DirectoryReader.open(targetDir)) { @@ -182,8 +182,8 @@ public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, assertEquals(0, id.totalHits.value); } - snapshoter = new SourceOnlySnapshot(targetDir); - List createdFiles = snapshoter.syncSnapshot(snapshot); + snapshotter = new SourceOnlySnapshot(targetDir); + List createdFiles = snapshotter.syncSnapshot(snapshot); assertEquals(0, createdFiles.size()); deletionPolicy.release(snapshot); // now add another doc @@ -202,8 +202,8 @@ public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, writer.commit(); { snapshot = deletionPolicy.snapshot(); - snapshoter = new SourceOnlySnapshot(targetDir); - createdFiles = snapshoter.syncSnapshot(snapshot); + snapshotter = new SourceOnlySnapshot(targetDir); + createdFiles = snapshotter.syncSnapshot(snapshot); assertEquals(4, createdFiles.size()); for (String file : createdFiles) { String extension = IndexFileNames.getExtension(file); @@ -227,8 +227,8 @@ public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, writer.commit(); { snapshot = deletionPolicy.snapshot(); - snapshoter = new SourceOnlySnapshot(targetDir); - createdFiles = snapshoter.syncSnapshot(snapshot); + snapshotter = new SourceOnlySnapshot(targetDir); + createdFiles = snapshotter.syncSnapshot(snapshot); assertEquals(1, createdFiles.size()); for (String file : createdFiles) { String extension = IndexFileNames.getExtension(file); @@ -285,8 +285,8 @@ public boolean keepFullyDeletedSegment(IOSupplier readerIOSupplier) writer.commit(); try (Directory targetDir = newDirectory()) { IndexCommit snapshot = deletionPolicy.snapshot(); - SourceOnlySnapshot snapshoter = new SourceOnlySnapshot(targetDir); - snapshoter.syncSnapshot(snapshot); + SourceOnlySnapshot snapshotter = new SourceOnlySnapshot(targetDir); + snapshotter.syncSnapshot(snapshot); try (DirectoryReader snapReader = DirectoryReader.open(targetDir)) { assertEquals(snapReader.maxDoc(), 1); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java index 10ea0111b91aa..3b23eb9be7743 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java @@ -40,7 +40,7 @@ import static org.elasticsearch.test.ESTestCase.terminate; /** - * A MockWebServer to test against. Holds a list of responses, which can be enqueed. + * A MockWebServer to test against. Holds a list of responses, which can be enqueued. * The webserver has to enqueue at least the amount of responses with the number of requests that happen, otherwise errors * will be returned. *

    @@ -208,7 +208,7 @@ public int getPort() { */ public void enqueue(MockResponse response) { if (logger.isTraceEnabled()) { - logger.trace("[{}:{}] Enqueueing response [{}], status [{}] body [{}]", getHostName(), getPort(), responses.size(), + logger.trace("[{}:{}] Enqueuing response [{}], status [{}] body [{}]", getHostName(), getPort(), responses.size(), response.getStatusCode(), getStartOfBody(response)); } responses.add(response); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java index a887871599ecc..c7248f10eea0a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/AllocationRoutedStepTests.java @@ -314,7 +314,7 @@ public void testExecuteIndexMissing() throws Exception { Result actualResult = step.isConditionMet(index, clusterState); assertFalse(actualResult.isComplete()); - assertNull(actualResult.getInfomationContext()); + assertNull(actualResult.getInformationContext()); } private void assertAllocateStatus(Index index, int shards, int replicas, AllocationRoutedStep step, Settings.Builder existingSettings, @@ -334,6 +334,6 @@ private void assertAllocateStatus(Index index, int shards, int replicas, Allocat .routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); Result actualResult = step.isConditionMet(index, clusterState); assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java index 579ee1630e225..e068a917e73f7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/CheckShrinkReadyStepTests.java @@ -324,7 +324,7 @@ public void testExecuteIndexMissing() throws Exception { ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); assertFalse(actualResult.isComplete()); - assertNull(actualResult.getInfomationContext()); + assertNull(actualResult.getInformationContext()); } private void assertAllocateStatus(Index index, int shards, int replicas, CheckShrinkReadyStep step, Settings.Builder existingSettings, @@ -348,6 +348,6 @@ private void assertAllocateStatus(Index index, int shards, int replicas, CheckSh .routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java index 272b50499d7fd..66e6e6cfc1be1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkShardsAllocatedStepTests.java @@ -98,7 +98,7 @@ public void testConditionMet() { Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); assertTrue(result.isComplete()); - assertNull(result.getInfomationContext()); + assertNull(result.getInformationContext()); } public void testConditionNotMetBecauseOfActive() { @@ -139,7 +139,7 @@ public void testConditionNotMetBecauseOfActive() { Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), - result.getInfomationContext()); + result.getInformationContext()); } public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { @@ -166,6 +166,6 @@ public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInfomationContext()); + assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInformationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java index 64d2dd09f9659..819c62ad47804 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ShrunkenIndexCheckStepTests.java @@ -66,7 +66,7 @@ public void testConditionMet() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertTrue(result.isComplete()); - assertNull(result.getInfomationContext()); + assertNull(result.getInformationContext()); } public void testConditionNotMetBecauseNotSameShrunkenIndex() { @@ -83,7 +83,7 @@ public void testConditionNotMetBecauseNotSameShrunkenIndex() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInformationContext()); } public void testConditionNotMetBecauseSourceIndexExists() { @@ -105,7 +105,7 @@ public void testConditionNotMetBecauseSourceIndexExists() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInformationContext()); } public void testIllegalState() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java index 46acda7fdebf5..9de5ed45d63d8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForRolloverReadyStepTests.java @@ -156,7 +156,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { conditionsMet.set(complete); } @@ -188,7 +188,7 @@ public void testPerformActionWithIndexingComplete() { step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { conditionsMet.set(complete); } @@ -216,7 +216,7 @@ public void testPerformActionWithIndexingCompleteStillWriteIndex() { step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Should have failed with indexing_complete but index is not write index"); } @@ -273,7 +273,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { actionCompleted.set(complete); } @@ -332,7 +332,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Unexpected method call"); } @@ -360,7 +360,7 @@ public void testPerformActionInvalidNullOrEmptyAlias() { SetOnce exceptionThrown = new SetOnce<>(); step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Unexpected method call"); } @@ -385,7 +385,7 @@ public void testPerformActionAliasDoesNotPointToIndex() { SetOnce exceptionThrown = new SetOnce<>(); step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Unexpected method call"); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java index b11c571bf78b8..b1d49347496af 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java @@ -7,7 +7,7 @@ import org.elasticsearch.test.AbstractStreamableTestCase; -public class UpdateCalendarJobActionResquestTests extends AbstractStreamableTestCase { +public class UpdateCalendarJobActionRequestTests extends AbstractStreamableTestCase { @Override protected UpdateCalendarJobAction.Request createTestInstance() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 7975bd657f9f2..581d8909ee6f4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -514,7 +514,7 @@ public void testCheckHistogramAggregationHasChildMaxTimeAgg() { assertThat(e.getMessage(), containsString("Date histogram must have nested max aggregation for time_field [max_time]")); } - public void testValidateAggregations_GivenMulitpleHistogramAggs() { + public void testValidateAggregations_GivenMultipleHistogramAggs() { DateHistogramAggregationBuilder nestedDateHistogram = AggregationBuilders.dateHistogram("nested_time"); AvgAggregationBuilder avg = AggregationBuilders.avg("avg").subAggregation(nestedDateHistogram); TermsAggregationBuilder nestedTerms = AggregationBuilders.terms("nested_terms"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index bf61ed541aebb..a9a8ceb6e59fc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -78,7 +78,7 @@ public static DatafeedUpdate createRandomized(String datafeedId, @Nullable Dataf if (randomBoolean() && datafeed == null) { // can only test with a single agg as the xcontent order gets randomized by test base class and then // the actual xcontent isn't the same and test fail. - // Testing with a single agg is ok as we don't have special list writeable / xconent logic + // Testing with a single agg is ok as we don't have special list writeable / xcontent logic AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); builder.setAggregations(aggs); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java index fe546a371816d..ff3bdec13a78d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java @@ -201,7 +201,7 @@ public void testVerifyFieldNames_givenInvalidChars() { } } - public void testVerifyFunctionForPreSummariedInput() { + public void testVerifyFunctionForPreSummarizedInput() { Collection testCaseArguments = getCharactersAndValidity(); for (Object [] args : testCaseArguments) { String character = (String) args[0]; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index e4ce536a3ccf6..d261c14e97aa2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -51,7 +51,7 @@ public class ElasticsearchMappingsTests extends ESTestCase { ElasticsearchMappings.WHITESPACE ); - public void testResultsMapppingReservedFields() throws Exception { + public void testResultsMappingReservedFields() throws Exception { Set overridden = new HashSet<>(KEYWORDS); // These are not reserved because they're data types, not field names @@ -68,7 +68,7 @@ public void testResultsMapppingReservedFields() throws Exception { compareFields(expected, ReservedFieldNames.RESERVED_RESULT_FIELD_NAMES); } - public void testConfigMapppingReservedFields() throws Exception { + public void testConfigMappingReservedFields() throws Exception { Set overridden = new HashSet<>(KEYWORDS); // These are not reserved because they're data types, not field names diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java index 882a46f3cbe20..3a44b2c66b609 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java @@ -137,7 +137,7 @@ public void testToXContentOrdersDuplicateInputFields() throws IOException { Influence influence3 = new Influence("spoiler", Collections.singletonList("no")); record.setInfluencers(Arrays.asList(influence1, influence2, influence3)); - // influencer fields with the same name as a by/over/partitiion field + // influencer fields with the same name as a by/over/partition field // come second in the list BytesReference bytes = XContentHelper.toXContent(record, XContentType.JSON, false); XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java index ae7798815731b..49bd034b6b627 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandlerTests.java @@ -122,13 +122,13 @@ public void testSortsWWWAuthenticateHeaderValues() { final String basicAuthScheme = "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""; final String bearerAuthScheme = "Bearer realm=\"" + XPackField.SECURITY + "\""; final String negotiateAuthScheme = randomFrom("Negotiate", "Negotiate Ijoijksdk"); - final Map> failureResponeHeaders = new HashMap<>(); + final Map> failureResponseHeaders = new HashMap<>(); final List supportedSchemes = Arrays.asList(basicAuthScheme, bearerAuthScheme, negotiateAuthScheme); Collections.shuffle(supportedSchemes, random()); - failureResponeHeaders.put("WWW-Authenticate", supportedSchemes); - final DefaultAuthenticationFailureHandler failuerHandler = new DefaultAuthenticationFailureHandler(failureResponeHeaders); + failureResponseHeaders.put("WWW-Authenticate", supportedSchemes); + final DefaultAuthenticationFailureHandler failureHandler = new DefaultAuthenticationFailureHandler(failureResponseHeaders); - final ElasticsearchSecurityException ese = failuerHandler.exceptionProcessingRequest(mock(RestRequest.class), null, + final ElasticsearchSecurityException ese = failureHandler.exceptionProcessingRequest(mock(RestRequest.class), null, new ThreadContext(Settings.builder().build())); assertThat(ese, is(notNullValue())); diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index d05c075d26b65..d05887ae7c219 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -276,7 +276,7 @@ synchronized void expand() { // significant e.g. in the lastfm example of // plotting a single user's tastes and how that maps // into a network showing only the most interesting - // band connections. So line below commmented out + // band connections. So line below commented out // nextWaveSigTerms.size(includes.length); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java index 84596c423b339..ef3861d4a6098 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/ChangePolicyforIndexIT.java @@ -44,7 +44,7 @@ public class ChangePolicyforIndexIT extends ESRestTestCase { * to rollover and a warm phase with an impossible allocation action. The * second policy has a rollover action requiring 1000 document and a warm * phase that moves the index to known nodes that will succeed. An index is - * created with the fiorst policy set and the test ensures the policy is in + * created with the first policy set and the test ensures the policy is in * the rollover step. It then changes the policy for the index to the second * policy. It indexes a single document and checks that the index moves past * the hot phase and through the warm phasee (proving the hot phase diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java index 70aa9af2c7277..85002ff539706 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java @@ -135,7 +135,7 @@ public ClusterState execute(final ClusterState currentState) throws IOException // not met, we can't advance any way, so don't attempt // to run the current step nextStepKey = null; - ToXContentObject stepInfo = result.getInfomationContext(); + ToXContentObject stepInfo = result.getInformationContext(); if (stepInfo == null) { return state; } else { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java index 0f69b1f21dce7..7b1c6f95fc6e3 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportExplainLifecycleAction.java @@ -72,7 +72,7 @@ protected ClusterBlockException checkBlock(ExplainLifecycleRequest request, Clus @Override protected void doMasterOperation(ExplainLifecycleRequest request, String[] concreteIndices, ClusterState state, ActionListener listener) { - Map indexReponses = new HashMap<>(); + Map indexResponses = new HashMap<>(); for (String index : concreteIndices) { IndexMetaData idxMetadata = state.metaData().index(index); Settings idxSettings = idxMetadata.getSettings(); @@ -113,9 +113,9 @@ protected void doMasterOperation(ExplainLifecycleRequest request, String[] concr } else { indexResponse = IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index); } - indexReponses.put(indexResponse.getIndex(), indexResponse); + indexResponses.put(indexResponse.getIndex(), indexResponse); } - listener.onResponse(new ExplainLifecycleResponse(indexReponses)); + listener.onResponse(new ExplainLifecycleResponse(indexResponses)); } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java index a041232d8a7e7..888a7bd667bdd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleInitialisationTests.java @@ -354,7 +354,7 @@ public void testMasterFailover() throws Exception { logger.info("new master is operation"); // complete the step - AcknowledgedResponse repsonse = client().admin().indices().prepareUpdateSettings("test") + AcknowledgedResponse response = client().admin().indices().prepareUpdateSettings("test") .setSettings(Collections.singletonMap("index.lifecycle.test.complete", true)).get(); assertBusy(() -> { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java index 4b0f9e7aac304..db12d5e719ecd 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -77,10 +77,10 @@ public void testGetJobs_GivenSingleJob() throws Exception { createFarequoteJob(jobId); // Explicit _all - String explictAll = EntityUtils.toString( + String explicitAll = EntityUtils.toString( client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/_all")).getEntity()); - assertThat(explictAll, containsString("\"count\":1")); - assertThat(explictAll, containsString("\"job_id\":\"" + jobId + "\"")); + assertThat(explicitAll, containsString("\"count\":1")); + assertThat(explicitAll, containsString("\"job_id\":\"" + jobId + "\"")); // Implicit _all String implicitAll = EntityUtils.toString( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 7cb74c4df5eda..7aab44fe805f6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -608,7 +608,7 @@ public List> getExecutorBuilders(Settings settings) { maxNumberOfJobs * 4, maxNumberOfJobs * 4, "xpack.ml.autodetect_thread_pool"); // 4 threads per job: processing logging, result and state of the renormalization process. - // Renormalization does't run for the entire lifetime of a job, so additionally autodetect process + // Renormalization doesn't run for the entire lifetime of a job, so additionally autodetect process // based operation (open, close, flush, post data), datafeed based operations (start and stop) // and deleting expired data use this threadpool too and queue up if all threads are busy. FixedExecutorBuilder renormalizer = new FixedExecutorBuilder(settings, UTILITY_THREAD_POOL_NAME, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java index 2575e71444447..2e3313da9970d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java @@ -92,8 +92,8 @@ protected void doExecute(Task task, DeleteForecastAction.Request request, Action QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), ForecastRequestStats.RESULT_TYPE_VALUE)); if (MetaData.ALL.equals(request.getForecastId()) == false) { - Set forcastIds = new HashSet<>(Arrays.asList(Strings.tokenizeToStringArray(forecastsExpression, ","))); - innerBool.must(QueryBuilders.termsQuery(Forecast.FORECAST_ID.getPreferredName(), forcastIds)); + Set forecastIds = new HashSet<>(Arrays.asList(Strings.tokenizeToStringArray(forecastsExpression, ","))); + innerBool.must(QueryBuilders.termsQuery(Forecast.FORECAST_ID.getPreferredName(), forecastIds)); } source.query(builder.filter(innerBool)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java index 2ea7acb0c5e95..7cd12769150e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java @@ -65,8 +65,8 @@ protected void doExecute(Task task, GetCalendarEventsAction.Request request, if (request.getJobId() != null) { jobConfigProvider.getJob(request.getJobId(), ActionListener.wrap( - jobBuiler -> { - Job job = jobBuiler.build(); + jobBuilder -> { + Job job = jobBuilder.build(); jobResultsProvider.scheduledEventsForJob(request.getJobId(), job.getGroups(), query, eventsListener); }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index c87fabe0b771d..263efa81a5bb1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -110,7 +110,7 @@ static void previewDatafeed(DataExtractor dataExtractor, ActionListener inputStream = dataExtractor.next(); // DataExtractor returns single-line JSON but without newline characters between objects. - // Instead, it has a space between objects due to how JSON XContenetBuilder works. + // Instead, it has a space between objects due to how JSON XContentBuilder works. // In order to return a proper JSON array from preview, we surround with square brackets and // we stick in a comma between objects. // Also, the stream is expected to be a single line but in case it is not, we join lines diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 5867948bbad63..438b7637196eb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -169,7 +169,7 @@ public void onFailure(Exception e) { }; // Verify data extractor factory can be created, then start persistent task - Consumer createDataExtrator = job -> { + Consumer createDataExtractor = job -> { if (RemoteClusterLicenseChecker.containsRemoteIndex(params.getDatafeedIndices())) { final RemoteClusterLicenseChecker remoteClusterLicenseChecker = new RemoteClusterLicenseChecker(client, XPackLicenseState::isMachineLearningAllowedForOperationMode); @@ -200,7 +200,7 @@ public void onFailure(Exception e) { Job job = jobBuilder.build(); validate(job, datafeedConfigHolder.get(), tasks); auditDeprecations(datafeedConfigHolder.get(), job, auditor); - createDataExtrator.accept(job); + createDataExtractor.accept(job); } catch (Exception e) { listener.onFailure(e); } @@ -239,7 +239,7 @@ private void createDataExtractor(Job job, DatafeedConfig datafeed, StartDatafeed @Override protected ClusterBlockException checkBlock(StartDatafeedAction.Request request, ClusterState state) { // We only delegate here to PersistentTasksService, but if there is a metadata writeblock, - // then delagating to PersistentTasksService doesn't make a whole lot of sense, + // then delegating to PersistentTasksService doesn't make a whole lot of sense, // because PersistentTasksService will then fail. return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index d4bbc04cdf51d..b19e80fccaa3c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -71,7 +71,7 @@ void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigPr AtomicReference datafeedConfigHolder = new AtomicReference<>(); // Step 5. Build datafeed job object - Consumer contextHanlder = context -> { + Consumer contextHandler = context -> { TimeValue frequency = getFrequencyOrDefault(datafeedConfigHolder.get(), jobHolder.get()); TimeValue queryDelay = datafeedConfigHolder.get().getQueryDelay(); DelayedDataDetector delayedDataDetector = @@ -90,7 +90,7 @@ void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigPr ActionListener dataExtractorFactoryHandler = ActionListener.wrap( dataExtractorFactory -> { context.dataExtractorFactory = dataExtractorFactory; - contextHanlder.accept(context); + contextHandler.accept(context); }, e -> { auditor.error(jobHolder.get().getId(), e.getMessage()); listener.onFailure(e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index 986387c2ed808..800ed2f9f9bf9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -55,7 +55,7 @@ public DataExtractor newExtractor(long start, long end) { public static void create(Client client, DatafeedConfig datafeed, Job job, ActionListener listener) { - // Step 2. Contruct the factory and notify listener + // Step 2. Construct the factory and notify listener ActionListener fieldCapabilitiesHandler = ActionListener.wrap( fieldCapabilitiesResponse -> { TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index d9ea6cb7c32e4..7c53c34a4550b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -338,7 +338,7 @@ private void indexUpdatedConfig(DatafeedConfig updatedConfig, long version, Acti } /** - * Expands an expression into the set of matching names. {@code expresssion} + * Expands an expression into the set of matching names. {@code expression} * may be a wildcard, a datafeed ID or a list of those. * If {@code expression} == 'ALL', '*' or the empty string then all * datafeed IDs are returned. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java index 97984d1d77560..ae36cae9b8703 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java @@ -87,7 +87,7 @@ public boolean canCreateFromSample(List explanation, String sample) { ++completeDocCount; // Find the position that's one character beyond end of the end element. // The next document (if there is one) must start after this (possibly - // preceeded by whitespace). + // preceded by whitespace). Location location = xmlReader.getLocation(); int endPos = 0; // Line and column numbers start at 1, not 0 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index 73b1fe155fc3c..49ea662cc3987 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -460,7 +460,7 @@ public void onFailure(Exception e) { /** * For the list of job Ids find all that match existing jobs Ids. - * The repsonse is all the job Ids in {@code ids} that match an existing + * The response is all the job Ids in {@code ids} that match an existing * job Id. * @param ids Job Ids to find * @param listener The matched Ids listener @@ -524,7 +524,7 @@ public void markJobAsDeleting(String jobId, ActionListener listener) { } /** - * Expands an expression into the set of matching names. {@code expresssion} + * Expands an expression into the set of matching names. {@code expression} * may be a wildcard, a job group, a job Id or a list of those. * If {@code expression} == 'ALL', '*' or the empty string then all * job Ids are returned. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index cc75d48b81c0b..1f1b3d2934176 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -1028,11 +1028,11 @@ public void getEstablishedMemoryUsage(String jobId, Date latestBucketTimestamp, // no need to do an extra search in the case of exactly one document being aggregated handler.accept((long) extendedStats.getAvg()); } else { - double coefficientOfVaration = extendedStats.getStdDeviation() / extendedStats.getAvg(); + double coefficientOfVariation = extendedStats.getStdDeviation() / extendedStats.getAvg(); LOGGER.trace("[{}] Coefficient of variation [{}] when calculating established memory use", - jobId, coefficientOfVaration); + jobId, coefficientOfVariation); // is there sufficient stability in the latest model size stats readings? - if (coefficientOfVaration <= ESTABLISHED_MEMORY_CV_THRESHOLD) { + if (coefficientOfVariation <= ESTABLISHED_MEMORY_CV_THRESHOLD) { // yes, so return the latest model size as established handleLatestModelSizeStats(jobId, latestModelSizeStats, handler, errorHandler); } else { @@ -1237,7 +1237,7 @@ public void calendars(CalendarQueryBuilder queryBuilder, ActionListener listener) { - ActionListener updateCalandarsListener = ActionListener.wrap( + ActionListener updateCalendarsListener = ActionListener.wrap( r -> { if (r.hasFailures()) { listener.onResponse(false); @@ -1268,7 +1268,7 @@ public void removeJobFromCalendars(String jobId, ActionListener listene }); if (bulkUpdate.numberOfActions() > 0) { - executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkUpdate.request(), updateCalandarsListener); + executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkUpdate.request(), updateCalendarsListener); } else { listener.onResponse(true); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index b595c564ab9aa..de040bd580c2d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -79,7 +79,7 @@ private long calcCutoffEpochMs(long retentionDays) { /** * Template method to allow implementation details of various types of data (e.g. results, model snapshots). - * Implementors need to call {@code listener.onResponse} when they are done in order to continue to the next job. + * Implementers need to call {@code listener.onResponse} when they are done in order to continue to the next job. */ protected abstract void removeDataBefore(Job job, long cutoffEpochMs, ActionListener listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java index c4f2b4a463185..1647b64e5c5df 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java @@ -24,7 +24,7 @@ public interface NativeProcess extends Closeable { * Write the record to the process. The record parameter should not be encoded * (i.e. length encoded) the implementation will apply the correct encoding. * - * @param record Plain array of strings, implementors of this class should + * @param record Plain array of strings, implementers of this class should * encode the record appropriately * @throws IOException If the write failed */ diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java index 1f9aee7bbeea3..7c9b771f69502 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java @@ -85,7 +85,7 @@ public boolean localTmpStorageHasEnoughSpace(Path path, ByteSizeValue requestedS return getUsableSpace(p) >= requestedSize.getBytes() + minLocalStorageAvailable.getBytes(); } } catch (IOException e) { - LOGGER.debug("Failed to optain information about path [{}]: {}", path, e); + LOGGER.debug("Failed to obtain information about path [{}]: {}", path, e); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetCategoriesAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetCategoriesAction.java index 900acec1b5a6d..7d1444f9e45ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetCategoriesAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/results/RestGetCategoriesAction.java @@ -58,7 +58,7 @@ public RestGetCategoriesAction(Settings settings, RestController controller) { @Override public String getName() { - return "ml_get_catagories_action"; + return "ml_get_categories_action"; } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index b2f1107590712..bf039eea1c35c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -91,7 +91,7 @@ public void testBuildPreviewDatafeed_GivenAggregations() { assertThat(previewDatafeed.getChunkingConfig(), equalTo(datafeed.build().getChunkingConfig())); } - public void testPreviewDatafed_GivenEmptyStream() throws IOException { + public void testPreviewDatafeed_GivenEmptyStream() throws IOException { when(dataExtractor.next()).thenReturn(Optional.empty()); TransportPreviewDatafeedAction.previewDatafeed(dataExtractor, actionListener); @@ -101,7 +101,7 @@ public void testPreviewDatafed_GivenEmptyStream() throws IOException { verify(dataExtractor).cancel(); } - public void testPreviewDatafed_GivenNonEmptyStream() throws IOException { + public void testPreviewDatafeed_GivenNonEmptyStream() throws IOException { String streamAsString = "{\"a\":1, \"b\":2} {\"c\":3, \"d\":4}\n{\"e\":5, \"f\":6}"; InputStream stream = new ByteArrayInputStream(streamAsString.getBytes(StandardCharsets.UTF_8)); when(dataExtractor.next()).thenReturn(Optional.of(stream)); @@ -113,7 +113,7 @@ public void testPreviewDatafed_GivenNonEmptyStream() throws IOException { verify(dataExtractor).cancel(); } - public void testPreviewDatafed_GivenFailure() throws IOException { + public void testPreviewDatafeed_GivenFailure() throws IOException { doThrow(new RuntimeException("failed")).when(dataExtractor).next(); TransportPreviewDatafeedAction.previewDatafeed(dataExtractor, actionListener); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index 54aa3ade8e1b9..4ecb87db2a745 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -278,7 +278,7 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { capturedClusterStateListener.getValue().clusterChanged( new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs.build())); - // Now it should run as the job state chanded to OPENED + // Now it should run as the job state changed to OPENED verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java index 47d2eb828c6a4..3ec49fccb69ec 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java @@ -78,7 +78,7 @@ static Terms createTerms(String name, Term... terms) { when(bucket.getKey()).thenReturn(term.key); when(bucket.getDocCount()).thenReturn(term.count); List numericAggs = new ArrayList<>(); - if (term.hasBuckekAggs()) { + if (term.hasBucketAggs()) { when(bucket.getAggregations()).thenReturn(createAggs(term.bucketAggs)); } else { for (Map.Entry keyValue : term.values.entrySet()) { @@ -133,7 +133,7 @@ static class Term { this.bucketAggs = bucketAggs; } - private boolean hasBuckekAggs() { + private boolean hasBucketAggs() { return bucketAggs != null; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index 20a5783dff7d2..cc9f5689b4a42 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -112,11 +112,11 @@ public void testExtractionGivenSpecifiedChunk() throws IOException { InputStream inputStream2 = mock(InputStream.class); InputStream inputStream3 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1, inputStream2); - when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1, inputStream2); + when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1); - DataExtractor subExtactor2 = new StubSubExtractor(inputStream3); - when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(inputStream3); + when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -152,11 +152,11 @@ public void testExtractionGivenSpecifiedChunkAndAggs() throws IOException { InputStream inputStream2 = mock(InputStream.class); InputStream inputStream3 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1, inputStream2); - when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1, inputStream2); + when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1); - DataExtractor subExtactor2 = new StubSubExtractor(inputStream3); - when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(inputStream3); + when(dataExtractorFactory.newExtractor(2000L, 2300L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -193,11 +193,11 @@ public void testExtractionGivenAutoChunkAndAggs() throws IOException { InputStream inputStream2 = mock(InputStream.class); // 200 * 1_000 == 200_000 - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(100_000L, 300_000L)).thenReturn(subExtractor1); - DataExtractor subExtactor2 = new StubSubExtractor(inputStream2); - when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(inputStream2); + when(dataExtractorFactory.newExtractor(300_000L, 450_000L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -238,11 +238,11 @@ public void testExtractionGivenAutoChunkAndScrollSize1000() throws IOException { InputStream inputStream1 = mock(InputStream.class); InputStream inputStream2 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(100000L, 300000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(100000L, 300000L)).thenReturn(subExtractor1); - DataExtractor subExtactor2 = new StubSubExtractor(inputStream2); - when(dataExtractorFactory.newExtractor(300000L, 450000L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(inputStream2); + when(dataExtractorFactory.newExtractor(300000L, 450000L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -269,11 +269,11 @@ public void testExtractionGivenAutoChunkAndScrollSize500() throws IOException { InputStream inputStream1 = mock(InputStream.class); InputStream inputStream2 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(100000L, 200000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(100000L, 200000L)).thenReturn(subExtractor1); - DataExtractor subExtactor2 = new StubSubExtractor(inputStream2); - when(dataExtractorFactory.newExtractor(200000L, 300000L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(inputStream2); + when(dataExtractorFactory.newExtractor(200000L, 300000L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -298,11 +298,11 @@ public void testExtractionGivenAutoChunkIsLessThanMinChunk() throws IOException InputStream inputStream1 = mock(InputStream.class); InputStream inputStream2 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(100000L, 160000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(100000L, 160000L)).thenReturn(subExtractor1); - DataExtractor subExtactor2 = new StubSubExtractor(inputStream2); - when(dataExtractorFactory.newExtractor(160000L, 220000L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(inputStream2); + when(dataExtractorFactory.newExtractor(160000L, 220000L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -326,8 +326,8 @@ public void testExtractionGivenAutoChunkAndDataTimeSpreadIsZero() throws IOExcep InputStream inputStream1 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(300L, 500L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(300L, 500L)).thenReturn(subExtractor1); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -351,8 +351,8 @@ public void testExtractionGivenAutoChunkAndTotalTimeRangeSmallerThanChunk() thro InputStream inputStream1 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(1L, 101L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(1L, 101L)).thenReturn(subExtractor1); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -376,12 +376,12 @@ public void testExtractionGivenAutoChunkAndIntermediateEmptySearchShouldReconfig InputStream inputStream1 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(100000L, 200000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(100000L, 200000L)).thenReturn(subExtractor1); // This one is empty - DataExtractor subExtactor2 = new StubSubExtractor(); - when(dataExtractorFactory.newExtractor(200000, 300000L)).thenReturn(subExtactor2); + DataExtractor subExtractor2 = new StubSubExtractor(); + when(dataExtractorFactory.newExtractor(200000, 300000L)).thenReturn(subExtractor2); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -392,8 +392,8 @@ public void testExtractionGivenAutoChunkAndIntermediateEmptySearchShouldReconfig // This is the last one InputStream inputStream2 = mock(InputStream.class); - DataExtractor subExtactor3 = new StubSubExtractor(inputStream2); - when(dataExtractorFactory.newExtractor(200000, 400000)).thenReturn(subExtactor3); + DataExtractor subExtractor3 = new StubSubExtractor(inputStream2); + when(dataExtractorFactory.newExtractor(200000, 400000)).thenReturn(subExtractor3); assertEquals(inputStream2, extractor.next().get()); assertThat(extractor.next().isPresent(), is(false)); @@ -419,8 +419,8 @@ public void testCancelGivenNextWasNeverCalled() { InputStream inputStream1 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1); assertThat(extractor.hasNext(), is(true)); @@ -439,8 +439,8 @@ public void testCancelGivenCurrentSubExtractorHasMore() throws IOException { InputStream inputStream1 = mock(InputStream.class); InputStream inputStream2 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1, inputStream2); - when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1, inputStream2); + when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); @@ -465,8 +465,8 @@ public void testCancelGivenCurrentSubExtractorIsDone() throws IOException { InputStream inputStream1 = mock(InputStream.class); - DataExtractor subExtactor1 = new StubSubExtractor(inputStream1); - when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtactor1); + DataExtractor subExtractor1 = new StubSubExtractor(inputStream1); + when(dataExtractorFactory.newExtractor(1000L, 2000L)).thenReturn(subExtractor1); assertThat(extractor.hasNext(), is(true)); assertEquals(inputStream1, extractor.next().get()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index 3843181a0bc3c..29181714b8869 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -79,7 +79,7 @@ public void createComponents() throws Exception { waitForMlTemplates(); } - public void testGetCalandarByJobId() throws Exception { + public void testGetCalendarByJobId() throws Exception { List calendars = new ArrayList<>(); calendars.add(new Calendar("empty calendar", Collections.emptyList(), null)); calendars.add(new Calendar("foo calendar", Collections.singletonList("foo"), null)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/CategorizationAnalyzerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/CategorizationAnalyzerTests.java index 59413f6a61879..ff8cad7dce55c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/CategorizationAnalyzerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/CategorizationAnalyzerTests.java @@ -66,14 +66,14 @@ public void testVerifyConfigBuilder_GivenInvalidAnalyzer() { } public void testVerifyConfigBuilder_GivenValidCustomConfig() throws IOException { - Map ignoreStuffInSqaureBrackets = new HashMap<>(); - ignoreStuffInSqaureBrackets.put("type", "pattern_replace"); - ignoreStuffInSqaureBrackets.put("pattern", "\\[[^\\]]*\\]"); + Map ignoreStuffInSquareBrackets = new HashMap<>(); + ignoreStuffInSquareBrackets.put("type", "pattern_replace"); + ignoreStuffInSquareBrackets.put("pattern", "\\[[^\\]]*\\]"); Map ignoreStuffThatBeginsWithADigit = new HashMap<>(); ignoreStuffThatBeginsWithADigit.put("type", "pattern_replace"); ignoreStuffThatBeginsWithADigit.put("pattern", "^[0-9].*"); CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder() - .addCharFilter(ignoreStuffInSqaureBrackets) + .addCharFilter(ignoreStuffInSquareBrackets) .setTokenizer("classic") .addTokenFilter("lowercase") .addTokenFilter(ignoreStuffThatBeginsWithADigit) @@ -107,11 +107,11 @@ public void testVerifyConfigBuilder_GivenCustomConfigWithMisconfiguredCharFilter } public void testVerifyConfigBuilder_GivenCustomConfigWithInvalidTokenizer() { - Map ignoreStuffInSqaureBrackets = new HashMap<>(); - ignoreStuffInSqaureBrackets.put("type", "pattern_replace"); - ignoreStuffInSqaureBrackets.put("pattern", "\\[[^\\]]*\\]"); + Map ignoreStuffInSquareBrackets = new HashMap<>(); + ignoreStuffInSquareBrackets.put("type", "pattern_replace"); + ignoreStuffInSquareBrackets.put("pattern", "\\[[^\\]]*\\]"); CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder() - .addCharFilter(ignoreStuffInSqaureBrackets) + .addCharFilter(ignoreStuffInSquareBrackets) .setTokenizer("oops!") .addTokenFilter("lowercase") .addTokenFilter("snowball"); @@ -121,14 +121,14 @@ public void testVerifyConfigBuilder_GivenCustomConfigWithInvalidTokenizer() { } public void testVerifyConfigBuilder_GivenNoTokenizer() { - Map ignoreStuffInSqaureBrackets = new HashMap<>(); - ignoreStuffInSqaureBrackets.put("type", "pattern_replace"); - ignoreStuffInSqaureBrackets.put("pattern", "\\[[^\\]]*\\]"); + Map ignoreStuffInSquareBrackets = new HashMap<>(); + ignoreStuffInSquareBrackets.put("type", "pattern_replace"); + ignoreStuffInSquareBrackets.put("pattern", "\\[[^\\]]*\\]"); Map ignoreStuffThatBeginsWithADigit = new HashMap<>(); ignoreStuffThatBeginsWithADigit.put("type", "pattern_replace"); ignoreStuffThatBeginsWithADigit.put("pattern", "^[0-9].*"); CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder() - .addCharFilter(ignoreStuffInSqaureBrackets) + .addCharFilter(ignoreStuffInSquareBrackets) .addTokenFilter("lowercase") .addTokenFilter(ignoreStuffThatBeginsWithADigit) .addTokenFilter("snowball"); @@ -138,11 +138,11 @@ public void testVerifyConfigBuilder_GivenNoTokenizer() { } public void testVerifyConfigBuilder_GivenCustomConfigWithInvalidTokenFilter() { - Map ignoreStuffInSqaureBrackets = new HashMap<>(); - ignoreStuffInSqaureBrackets.put("type", "pattern_replace"); - ignoreStuffInSqaureBrackets.put("pattern", "\\[[^\\]]*\\]"); + Map ignoreStuffInSquareBrackets = new HashMap<>(); + ignoreStuffInSquareBrackets.put("type", "pattern_replace"); + ignoreStuffInSquareBrackets.put("pattern", "\\[[^\\]]*\\]"); CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder() - .addCharFilter(ignoreStuffInSqaureBrackets) + .addCharFilter(ignoreStuffInSquareBrackets) .setTokenizer("classic") .addTokenFilter("lowercase") .addTokenFilter("oh dear!"); @@ -285,14 +285,14 @@ public void testStandardAnalyzer() throws IOException { } public void testCustomAnalyzer() throws IOException { - Map ignoreStuffInSqaureBrackets = new HashMap<>(); - ignoreStuffInSqaureBrackets.put("type", "pattern_replace"); - ignoreStuffInSqaureBrackets.put("pattern", "\\[[^\\]]*\\]"); + Map ignoreStuffInSquareBrackets = new HashMap<>(); + ignoreStuffInSquareBrackets.put("type", "pattern_replace"); + ignoreStuffInSquareBrackets.put("pattern", "\\[[^\\]]*\\]"); Map ignoreStuffThatBeginsWithADigit = new HashMap<>(); ignoreStuffThatBeginsWithADigit.put("type", "pattern_replace"); ignoreStuffThatBeginsWithADigit.put("pattern", "^[0-9].*"); CategorizationAnalyzerConfig config = new CategorizationAnalyzerConfig.Builder() - .addCharFilter(ignoreStuffInSqaureBrackets) + .addCharFilter(ignoreStuffInSquareBrackets) .setTokenizer("classic") .addTokenFilter("lowercase") .addTokenFilter(ignoreStuffThatBeginsWithADigit) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index c2bda603724d6..0e4fb4aa2aa5e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -187,7 +187,7 @@ public void onFailure(Exception e) { } @SuppressWarnings("unchecked") - public void testCreateJobRelatedIndicies_createsAliasBecauseIndexNameIsSet() { + public void testCreateJobRelatedIndices_createsAliasBecauseIndexNameIsSet() { String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-bar"; String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName("foo"); String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias("foo"); @@ -394,7 +394,7 @@ public void testRecords() throws IOException { recordMap2.put("typical", 1122.4); recordMap2.put("actual", 933.3); recordMap2.put("timestamp", now.getTime()); - recordMap2.put("function", "irrascible"); + recordMap2.put("function", "irascible"); recordMap2.put("bucket_span", 22); source.add(recordMap1); source.add(recordMap2); @@ -421,7 +421,7 @@ public void testRecords() throws IOException { assertEquals("irritable", records.get(0).getFunction()); assertEquals(1122.4, records.get(1).getTypical().get(0), 0.000001); assertEquals(933.3, records.get(1).getActual().get(0), 0.000001); - assertEquals("irrascible", records.get(1).getFunction()); + assertEquals("irascible", records.get(1).getFunction()); } public void testRecords_UsingBuilder() throws IOException { @@ -441,7 +441,7 @@ public void testRecords_UsingBuilder() throws IOException { recordMap2.put("typical", 1122.4); recordMap2.put("actual", 933.3); recordMap2.put("timestamp", now.getTime()); - recordMap2.put("function", "irrascible"); + recordMap2.put("function", "irascible"); recordMap2.put("bucket_span", 22); source.add(recordMap1); source.add(recordMap2); @@ -474,7 +474,7 @@ public void testRecords_UsingBuilder() throws IOException { assertEquals("irritable", records.get(0).getFunction()); assertEquals(1122.4, records.get(1).getTypical().get(0), 0.000001); assertEquals(933.3, records.get(1).getActual().get(0), 0.000001); - assertEquals("irrascible", records.get(1).getFunction()); + assertEquals("irascible", records.get(1).getFunction()); } public void testBucketRecords() throws IOException { @@ -496,7 +496,7 @@ public void testBucketRecords() throws IOException { recordMap2.put("typical", 1122.4); recordMap2.put("actual", 933.3); recordMap2.put("timestamp", now.getTime()); - recordMap2.put("function", "irrascible"); + recordMap2.put("function", "irascible"); recordMap2.put("bucket_span", 22); source.add(recordMap1); source.add(recordMap2); @@ -521,7 +521,7 @@ public void testBucketRecords() throws IOException { assertEquals("irritable", records.get(0).getFunction()); assertEquals(1122.4, records.get(1).getTypical().get(0), 0.000001); assertEquals(933.3, records.get(1).getActual().get(0), 0.000001); - assertEquals("irrascible", records.get(1).getFunction()); + assertEquals("irascible", records.get(1).getFunction()); } public void testexpandBucket() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRangeTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRangeTests.java index 00593d12898de..8471cf1f5b20f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRangeTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRangeTests.java @@ -22,13 +22,13 @@ public void testGetEnd() { assertEquals("1462096800", TimeRange.builder().endTime("2016-05-01T10:00:00Z").build().getEnd()); } - public void test_UnparseableStartThrows() { + public void test_UnparsableStartThrows() { ElasticsearchParseException e = ESTestCase.expectThrows(ElasticsearchParseException.class, () -> TimeRange.builder().startTime("bad").build()); assertEquals(Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, TimeRange.START_PARAM, "bad"), e.getMessage()); } - public void test_UnparseableEndThrows() { + public void test_UnparsableEndThrows() { ElasticsearchParseException e = ESTestCase.expectThrows(ElasticsearchParseException.class, () -> TimeRange.builder().endTime("bad").build()); assertEquals(Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, TimeRange.END_PARAM, "bad"), e.getMessage()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java index 7cb5b6ea8ca01..dc90321ce4040 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java @@ -103,7 +103,7 @@ public void testRemoveGivenNoJobs() throws IOException { } - public void testRemoveGivenMulipleBatches() throws IOException { + public void testRemoveGivenMultipleBatches() throws IOException { // This is testing AbstractExpiredJobDataRemover.WrappedBatchedJobsIterator int totalHits = 7; List responses = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java index 8c69875a54dbc..f6d8b97dfe208 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java @@ -148,7 +148,7 @@ public void testThrottlingSummaryLevelChanges() throws IllegalAccessException, T executeLoggingTest(is, mockAppender, Level.INFO, "test_throttling"); } - public void testThrottlingLastMessageRepeast() throws IllegalAccessException, TimeoutException, IOException { + public void testThrottlingLastMessageRepeats() throws IllegalAccessException, TimeoutException, IOException { InputStream is = new ByteArrayInputStream(String.join("", TEST_MESSAGE_NOISE, TEST_MESSAGE_NOISE, TEST_MESSAGE_NOISE, TEST_MESSAGE_NOISE, TEST_MESSAGE_NOISE, TEST_MESSAGE_NOISE_DIFFERENT_MESSAGE).getBytes(StandardCharsets.UTF_8)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BytesReferenceMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BytesReferenceMonitoringDocTests.java index 68e5c5b657b5c..f064b443d3569 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BytesReferenceMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BytesReferenceMonitoringDocTests.java @@ -133,11 +133,11 @@ public void testEqualsAndHashcode() { doc.getNode(), doc.getSystem(), doc.getType(), doc.getId()); }); mutations.add(doc -> { - long intervaMillis; + long intervalMillis; do { - intervaMillis = randomNonNegativeLong(); - } while (intervaMillis == doc.getIntervalMillis()); - return createMonitoringDoc(doc.getCluster(), doc.getTimestamp(), intervaMillis, + intervalMillis = randomNonNegativeLong(); + } while (intervalMillis == doc.getIntervalMillis()); + return createMonitoringDoc(doc.getCluster(), doc.getTimestamp(), intervalMillis, doc.getNode(), doc.getSystem(), doc.getType(), doc.getId()); }); mutations.add(doc -> { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java index 8b028b712e717..00129cef9b6d5 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java @@ -484,7 +484,7 @@ private static List translateVSLeaf(ValuesSourceAggregationB return rolledMetrics; } - // Otherwise, we can cheat and serialize/deserialze into a temp stream as an easy way to clone + // Otherwise, we can cheat and serialize/deserialize into a temp stream as an easy way to clone // leaf metrics, since they don't have any sub-aggs try (BytesStreamOutput output = new BytesStreamOutput()) { try { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index e900d76c84913..119053119aad2 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -294,7 +294,7 @@ private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, Li InternalAggregations aggs) { int totalShards = rolledResponses.stream().mapToInt(SearchResponse::getTotalShards).sum(); - int sucessfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum(); + int successfulShards = rolledResponses.stream().mapToInt(SearchResponse::getSuccessfulShards).sum(); int skippedShards = rolledResponses.stream().mapToInt(SearchResponse::getSkippedShards).sum(); long took = rolledResponses.stream().mapToLong(r -> r.getTook().getMillis()).sum() ; @@ -306,7 +306,7 @@ private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, Li if (liveResponse != null) { totalShards += liveResponse.getTotalShards(); - sucessfulShards += liveResponse.getSuccessfulShards(); + successfulShards += liveResponse.getSuccessfulShards(); skippedShards += liveResponse.getSkippedShards(); took = Math.max(took, liveResponse.getTook().getMillis()); isTimedOut = isTimedOut && liveResponse.isTimedOut(); @@ -318,7 +318,7 @@ private static SearchResponse mergeFinalResponse(SearchResponse liveResponse, Li isTimedOut, isTerminatedEarly, numReducePhases); // Shard failures are ignored atm, so returning an empty array is fine - return new SearchResponse(combinedInternal, null, totalShards, sucessfulShards, skippedShards, + return new SearchResponse(combinedInternal, null, totalShards, successfulShards, skippedShards, took, ShardSearchFailure.EMPTY_ARRAY, rolledResponses.get(0).getClusters()); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index 86891eda669fa..bd8a0b19f8250 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -83,7 +83,7 @@ public void testDefaultTimeZone() { assertThat(config.getTimeZone(), equalTo(DateTimeZone.UTC.getID())); } - public void testUnkownTimeZone() { + public void testUnknownTimeZone() { Exception e = expectThrows(IllegalArgumentException.class, () -> new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, "FOO")); assertThat(e.getMessage(), equalTo("The datetime zone id 'FOO' is not recognised")); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 37b3fd84ef145..65e333b702b62 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -554,7 +554,7 @@ protected void onAbort() { } // Tests how we handle unknown keys that come back from composite agg, e.g. if we add support for new types but don't - // deal with it everyhwere + // deal with it everywhere public void testUnknownKey() throws Exception { AtomicBoolean isFinished = new AtomicBoolean(false); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 8515b538bd562..221a163803d2b 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -24,7 +24,7 @@ dependencyLicenses { if (project.inFipsJvm) { unitTest.enabled = false - // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are + // Forbidden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. tasks.withType(CheckForbiddenApis) { bundledSignatures -= "jdk-non-portable" diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index 9e970ea559ad7..a750ea07c408c 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -112,7 +112,7 @@ private Path initTempDir() throws Exception { } @BeforeClass - public static void chechFipsJvm() { + public static void checkFipsJvm() { assumeFalse("Can't run in a FIPS JVM, depends on Non FIPS BouncyCastle", inFipsJvm()); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index b7bf96119a2eb..ac50e02501e5d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -1350,7 +1350,7 @@ synchronized TokenMetaData pruneKeys(int numKeysToKeep) { } /** - * Returns the current in-use metdata of this {@link TokenService} + * Returns the current in-use metadata of this {@link TokenService} */ public synchronized TokenMetaData getTokenMetaData() { return newTokenMetaData(keyCache.currentTokenKeyHash, keyCache.cache.values()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java index 6089c8f9a70fb..d5cff42c0a5b7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java @@ -19,7 +19,7 @@ public interface CachingRealm { String name(); /** - * Expires a single user from the cache identified by the String agument + * Expires a single user from the cache identified by the String argument * @param username the identifier of the user to be cleared */ void expire(String username); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 6168192d4077f..12aefef96fbd8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -240,7 +240,7 @@ public static void buildRoleFromDescriptors(Collection roleDescr Set clusterPrivileges = new HashSet<>(); final List conditionalClusterPrivileges = new ArrayList<>(); Set runAs = new HashSet<>(); - Map, MergeableIndicesPrivilege> indicesPrivilegesMap = new HashMap<>(); + Map, MergableIndicesPrivilege> indicesPrivilegesMap = new HashMap<>(); // Keyed by application + resource Map>, Set> applicationPrivilegesMap = new HashMap<>(); @@ -267,10 +267,10 @@ public static void buildRoleFromDescriptors(Collection roleDescr if (isExplicitDenial == false) { indicesPrivilegesMap.compute(key, (k, value) -> { if (value == null) { - return new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), + return new MergableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery()); } else { - value.merge(new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), + value.merge(new MergableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery())); return value; } @@ -295,7 +295,7 @@ public static void buildRoleFromDescriptors(Collection roleDescr .cluster(clusterPrivileges, conditionalClusterPrivileges) .runAs(runAsPrivilege); indicesPrivilegesMap.entrySet().forEach((entry) -> { - MergeableIndicesPrivilege privilege = entry.getValue(); + MergableIndicesPrivilege privilege = entry.getValue(); builder.add(fieldPermissionsCache.getFieldPermissions(privilege.fieldPermissionsDefinition), privilege.query, IndexPrivilege.get(privilege.privileges), privilege.indices.toArray(Strings.EMPTY_ARRAY)); }); @@ -382,13 +382,13 @@ boolean isValueInNegativeLookupCache(String key) { /** * A mutable class that can be used to represent the combination of one or more {@link IndicesPrivileges} */ - private static class MergeableIndicesPrivilege { + private static class MergableIndicesPrivilege { private Set indices; private Set privileges; private FieldPermissionsDefinition fieldPermissionsDefinition; private Set query = null; - MergeableIndicesPrivilege(String[] indices, String[] privileges, @Nullable String[] grantedFields, @Nullable String[] deniedFields, + MergableIndicesPrivilege(String[] indices, String[] privileges, @Nullable String[] grantedFields, @Nullable String[] deniedFields, @Nullable BytesReference query) { this.indices = newHashSet(Objects.requireNonNull(indices)); this.privileges = newHashSet(Objects.requireNonNull(privileges)); @@ -398,7 +398,7 @@ private static class MergeableIndicesPrivilege { } } - void merge(MergeableIndicesPrivilege other) { + void merge(MergableIndicesPrivilege other) { assert indices.equals(other.indices) : "index names must be equivalent in order to merge"; Set groups = new HashSet<>(); groups.addAll(this.fieldPermissionsDefinition.getFieldGrantExcludeGroups()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java index d26d8db206936..bd50b90622dce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java @@ -65,7 +65,7 @@ public RestHasPrivilegesAction(Settings settings, RestController controller, Sec @Override public String getName() { - return "security_has_priviledges_action"; + return "security_has_privileges_action"; } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java index 9d255497ce51b..aa11b46655585 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java @@ -114,7 +114,7 @@ private boolean isLocalhost(InetAddress address) { try { return address.isAnyLocalAddress() || address.isLoopbackAddress() || NetworkInterface.getByInetAddress(address) != null; } catch (SocketException e) { - // not defined - ie. it's not a local address + // not defined - i.e. it's not a local address return false; } } diff --git a/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties b/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties index 2e878c207acfc..e80494fc1cb2c 100644 --- a/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties +++ b/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties @@ -5,7 +5,7 @@ # # meta-foo.zip <-- zip file for the meta plugin, with this structure: #|____elasticsearch/ -#| |____ <-- The plugin files for bundled_plugin_1 (the content of the elastisearch directory) +#| |____ <-- The plugin files for bundled_plugin_1 (the content of the elasticsearch directory) #| |____ <-- The plugin files for bundled_plugin_2 #| |____ meta-plugin-descriptor.properties <-- example contents below: # diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java index 9317e9f8dcb5a..9eff07b2de846 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java @@ -72,7 +72,7 @@ protected void assertAccessIsDenied(String user, String method, String uri) thro } /** - * Like {@code assertAcessIsDenied}, but for _bulk requests since the entire + * Like {@code assertAccessIsDenied}, but for _bulk requests since the entire * request will not be failed, just the individual ones */ protected void assertBodyHasAccessIsDenied(String user, Request request) throws IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java index dbdb16b798374..842b6eca3c77e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java @@ -60,7 +60,7 @@ protected String configRoles() { public void testDateMathExpressionsCanBeAuthorized() throws Exception { final String expression = ""; final String expectedIndexName = new IndexNameExpressionResolver().resolveDateMathExpression(expression); - final boolean refeshOnOperation = randomBoolean(); + final boolean refreshOnOperation = randomBoolean(); Client client = client().filterWithHeader(Collections.singletonMap("Authorization", basicAuthHeaderValue("user1", USERS_PASSWD))); if (randomBoolean()) { @@ -68,12 +68,12 @@ public void testDateMathExpressionsCanBeAuthorized() throws Exception { assertThat(response.isAcknowledged(), is(true)); } IndexResponse response = client.prepareIndex(expression, "type").setSource("foo", "bar") - .setRefreshPolicy(refeshOnOperation ? IMMEDIATE : NONE).get(); + .setRefreshPolicy(refreshOnOperation ? IMMEDIATE : NONE).get(); assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); assertThat(response.getIndex(), containsString(expectedIndexName)); - if (refeshOnOperation == false) { + if (refreshOnOperation == false) { client.admin().indices().prepareRefresh(expression).get(); } SearchResponse searchResponse = client.prepareSearch(expression) @@ -88,11 +88,11 @@ public void testDateMathExpressionsCanBeAuthorized() throws Exception { UpdateResponse updateResponse = client.prepareUpdate(expression, "type", response.getId()) .setDoc(Requests.INDEX_CONTENT_TYPE, "new", "field") - .setRefreshPolicy(refeshOnOperation ? IMMEDIATE : NONE) + .setRefreshPolicy(refreshOnOperation ? IMMEDIATE : NONE) .get(); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); - if (refeshOnOperation == false) { + if (refreshOnOperation == false) { client.admin().indices().prepareRefresh(expression).get(); } GetResponse getResponse = client.prepareGet(expression, "type", response.getId()).setFetchSource(true).get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index 675300e25760e..51c73ff39e3e1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -105,7 +105,7 @@ public void testSingleRole() throws Exception { try { client.prepareSearch("test", "test2").setQuery(matchAllQuery()).get(); - fail("expected an authorization exception when one of mulitple indices is forbidden"); + fail("expected an authorization exception when one of multiple indices is forbidden"); } catch (ElasticsearchSecurityException e) { // expected assertThat(e.status(), is(RestStatus.FORBIDDEN)); @@ -168,7 +168,7 @@ public void testMultipleRoles() throws Exception { .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD))) .prepareSearch(indices) .get(); - fail("expected an authorization excpetion when trying to search on multiple indices where there are no search permissions on " + + fail("expected an authorization exception when trying to search on multiple indices where there are no search permissions on " + "one/some of them"); } catch (ElasticsearchSecurityException e) { // expected diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java index 6d252e0035c9e..56717d1f67dce 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.test.AbstractBootstrapCheckTestCase; import org.elasticsearch.xpack.core.XPackSettings; -public class TokenSSLBootsrapCheckTests extends AbstractBootstrapCheckTestCase { +public class TokenSSLBootstrapCheckTests extends AbstractBootstrapCheckTestCase { public void testTokenSSLBootstrapCheck() { Settings settings = Settings.EMPTY; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index e7354b9b32564..9fa288bcb1d7c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -440,7 +440,7 @@ public void testAuthenticateRestSuccess() throws Exception { assertTrue(completed.get()); } - public void testAutheticateTransportContextAndHeader() throws Exception { + public void testAuthenticateTransportContextAndHeader() throws Exception { User user1 = new User("username", "r1", "r2"); when(firstRealm.token(threadContext)).thenReturn(token); when(firstRealm.supports(token)).thenReturn(true); @@ -464,7 +464,7 @@ public void testAutheticateTransportContextAndHeader() throws Exception { // checking authentication from the context InternalMessage message1 = new InternalMessage(); - ThreadPool threadPool1 = new TestThreadPool("testAutheticateTransportContextAndHeader1"); + ThreadPool threadPool1 = new TestThreadPool("testAuthenticateTransportContextAndHeader1"); try { ThreadContext threadContext1 = threadPool1.getThreadContext(); service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, @@ -486,7 +486,7 @@ public void testAutheticateTransportContextAndHeader() throws Exception { } // checking authentication from the user header - ThreadPool threadPool2 = new TestThreadPool("testAutheticateTransportContextAndHeader2"); + ThreadPool threadPool2 = new TestThreadPool("testAuthenticateTransportContextAndHeader2"); try { ThreadContext threadContext2 = threadPool2.getThreadContext(); final String header; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java index c3d6c5ae07e0e..d5d475f187346 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmCacheTests.java @@ -106,7 +106,7 @@ public void testCacheInvalidationScenarios() throws LoginException, GSSException verifyNoMoreInteractions(mockKerberosTicketValidator, mockNativeRoleMappingStore); } - public void testAuthenticateWithValidTicketSucessAuthnWithUserDetailsWhenCacheDisabled() + public void testAuthenticateWithValidTicketSuccessAuthnWithUserDetailsWhenCacheDisabled() throws LoginException, GSSException, IOException { // if cache.ttl <= 0 then the cache is disabled settings = buildKerberosRealmSettings(REALM_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java index 8d6869404bbaf..670181719d908 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTests.java @@ -69,7 +69,7 @@ public void testSupports() { assertThat(kerberosRealm.supports(usernamePasswordToken), is(false)); } - public void testAuthenticateWithValidTicketSucessAuthnWithUserDetails() throws LoginException, GSSException { + public void testAuthenticateWithValidTicketSuccessAuthnWithUserDetails() throws LoginException, GSSException { final String username = randomPrincipalName(); final KerberosRealm kerberosRealm = createKerberosRealm(username); final String expectedUsername = maybeRemoveRealmName(username); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index 9fa731138b355..63e4023eeee7c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -209,7 +209,7 @@ protected String[] ldapUrls() throws LDAPException { } public void testAuthenticateCachesSuccessfulAuthentications() throws Exception { - final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateCachesSuccesfulAuthentications"); + final RealmConfig.RealmIdentifier realmIdentifier = realmId("testAuthenticateCachesSuccessfulAuthentications"); Settings settings = settings(realmIdentifier); RealmConfig config = setupRealm(realmIdentifier, settings); ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java index 8d10f3ffb6946..5dfe737bf218e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -885,7 +885,7 @@ public void testAssertionWithoutSubjectConfirmationDataIsRejected() throws Excep assertThat(SamlUtils.isSamlException(exception), is(true)); } - public void testAssetionWithoutBearerSubjectConfirmationMethodIsRejected() throws Exception { + public void testAssertionWithoutBearerSubjectConfirmationMethodIsRejected() throws Exception { Instant now = clock.instant(); Instant validUntil = now.plusSeconds(30); final String sessionindex = randomId(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 83edb189e2935..9d7912ddfb5b0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -315,7 +315,7 @@ public void testDashNotExistingIndex() { assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); } - public void testResolveEmptyIndicesExpandWilcardsOpenAndClosed() { + public void testResolveEmptyIndicesExpandWildcardsOpenAndClosed() { SearchRequest request = new SearchRequest(); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -326,7 +326,7 @@ public void testResolveEmptyIndicesExpandWilcardsOpenAndClosed() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveEmptyIndicesExpandWilcardsOpen() { + public void testResolveEmptyIndicesExpandWildcardsOpen() { SearchRequest request = new SearchRequest(); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -335,7 +335,7 @@ public void testResolveEmptyIndicesExpandWilcardsOpen() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveAllExpandWilcardsOpenAndClosed() { + public void testResolveAllExpandWildcardsOpenAndClosed() { SearchRequest request = new SearchRequest("_all"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -346,7 +346,7 @@ public void testResolveAllExpandWilcardsOpenAndClosed() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveAllExpandWilcardsOpen() { + public void testResolveAllExpandWildcardsOpen() { SearchRequest request = new SearchRequest("_all"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -401,7 +401,7 @@ public void testResolveWildcardsLenientExpandOpen() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveWildcardsMinusExpandWilcardsOpen() { + public void testResolveWildcardsMinusExpandWildcardsOpen() { SearchRequest request = new SearchRequest("*", "-foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -412,7 +412,7 @@ public void testResolveWildcardsMinusExpandWilcardsOpen() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveWildcardsMinusExpandWilcardsOpenAndClosed() { + public void testResolveWildcardsMinusExpandWildcardsOpenAndClosed() { SearchRequest request = new SearchRequest("*", "-foofoo*"); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -423,7 +423,7 @@ public void testResolveWildcardsMinusExpandWilcardsOpenAndClosed() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveWildcardsExclusionsExpandWilcardsOpenStrict() { + public void testResolveWildcardsExclusionsExpandWildcardsOpenStrict() { SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz", "foob*"); request.indicesOptions(IndicesOptions.fromOptions(false, true, true, false)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -432,7 +432,7 @@ public void testResolveWildcardsExclusionsExpandWilcardsOpenStrict() { assertThat(request.indices(), arrayContainingInAnyOrder("bar", "foobarfoo", "barbaz", "foobarfoo")); } - public void testResolveWildcardsPlusAndMinusExpandWilcardsOpenIgnoreUnavailable() { + public void testResolveWildcardsPlusAndMinusExpandWildcardsOpenIgnoreUnavailable() { SearchRequest request = new SearchRequest("*", "-foofoo*", "+barbaz", "+foob*"); request.indicesOptions(IndicesOptions.fromOptions(true, true, true, false)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -443,7 +443,7 @@ public void testResolveWildcardsPlusAndMinusExpandWilcardsOpenIgnoreUnavailable( assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedStrict() { + public void testResolveWildcardsExclusionExpandWildcardsOpenAndClosedStrict() { SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz"); request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, true)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); @@ -452,7 +452,7 @@ public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedStrict() { assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); } - public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedIgnoreUnavailable() { + public void testResolveWildcardsExclusionExpandWildcardsOpenAndClosedIgnoreUnavailable() { SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz"); request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, true)); List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java index 78825d95ce078..9b5e2d0e62a4e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java @@ -145,7 +145,7 @@ public void testThatProfilesAreSupported() throws Exception { assertAddressIsDeniedForProfile("client", "192.168.0.2"); } - public void testThatProfilesAreUpdateable() throws Exception { + public void testThatProfilesAreUpdatable() throws Exception { Settings settings = Settings.builder() .put("xpack.security.transport.filter.allow", "localhost") .put("xpack.security.transport.filter.deny", "_all") @@ -199,7 +199,7 @@ public void testThatHttpWorks() throws Exception { assertAddressIsDeniedForProfile(IPFilter.HTTP_PROFILE_NAME, "192.168.0.1"); } - public void testThatHttpFallsbackToDefault() throws Exception { + public void testThatHttpFallsBackToDefault() throws Exception { Settings settings = Settings.builder() .put("xpack.security.transport.filter.allow", "127.0.0.1") .put("xpack.security.transport.filter.deny", "10.0.0.0/8") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java index df49103a25999..48c907d59fde5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java @@ -46,7 +46,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put("xpack.ssl.key", keyPath) .put("xpack.ssl.certificate", certPath) .put("xpack.ssl.certificate_authorities", certPath) - .put("xpack.ssl.verification_mode", "certificate") // disable hostname verificate since these certs aren't setup for that + .put("xpack.ssl.verification_mode", "certificate") // disable hostname verification since these certs aren't setup for that .build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java index 2928353269823..b3c1cb85042b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java @@ -39,7 +39,7 @@ public class SSLReloadIntegTests extends SecurityIntegTestCase { private Path nodeKeyPath; private Path nodeCertPath; private Path clientCertPath; - private Path updateableCertPath; + private Path updatableCertPath; @Override public Settings nodeSettings(int nodeOrdinal) { @@ -62,9 +62,9 @@ public Settings nodeSettings(int nodeOrdinal) { Files.copy(origClientCertPath, clientCertPath); } // Placeholder trusted certificate that will be updated later on - if (updateableCertPath == null) { - updateableCertPath = tempDir.resolve("updateable.crt"); - Files.copy(origCertPath, updateableCertPath); + if (updatableCertPath == null) { + updatableCertPath = tempDir.resolve("updatable.crt"); + Files.copy(origCertPath, updatableCertPath); } } catch (IOException e) { throw new ElasticsearchException("failed to copy key or certificate", e); @@ -79,7 +79,7 @@ public Settings nodeSettings(int nodeOrdinal) { .put("xpack.ssl.key_passphrase", "testnode") .put("xpack.ssl.certificate", nodeCertPath) .putList("xpack.ssl.certificate_authorities", Arrays.asList(nodeCertPath.toString(), clientCertPath.toString(), - updateableCertPath.toString())) + updatableCertPath.toString())) .put("resource.reload.interval.high", "1s"); return builder.build(); @@ -102,7 +102,7 @@ public void testThatSSLConfigurationReloadsOnModification() throws Exception { .put("xpack.ssl.key", keyPath) .put("xpack.ssl.certificate", certPath) .putList("xpack.ssl.certificate_authorities", Arrays.asList(nodeCertPath.toString(), clientCertPath.toString(), - updateableCertPath.toString())) + updatableCertPath.toString())) .setSecureSettings(secureSettings) .build(); String node = randomFrom(internalCluster().getNodeNames()); @@ -119,11 +119,11 @@ public void testThatSSLConfigurationReloadsOnModification() throws Exception { } catch (SSLException | SocketException expected) { logger.trace("expected exception", expected); } - // Copy testnode_updated.crt to the placeholder updateable.crt so that the nodes will start trusting it now + // Copy testnode_updated.crt to the placeholder updatable.crt so that the nodes will start trusting it now try { - Files.move(certPath, updateableCertPath, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); + Files.move(certPath, updatableCertPath, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException e) { - Files.move(certPath, updateableCertPath, StandardCopyOption.REPLACE_EXISTING); + Files.move(certPath, updatableCertPath, StandardCopyOption.REPLACE_EXISTING); } CountDownLatch latch = new CountDownLatch(1); assertBusy(() -> { diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index a077b4ac7ba10..0532ed483fae8 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -49,7 +49,7 @@ shadowJar { configurations = [project.configurations.runtime] } -// We need a no-depenencies jar though for qa testing so it doesn't conflict with cli +// We need a no-dependencies jar though for qa testing so it doesn't conflict with cli configurations { nodeps } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index 2650e5892d59b..22ec5a23db738 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -840,7 +840,7 @@ public ResultSet getTypeInfo() throws SQLException { @Override public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { - throw new SQLFeatureNotSupportedException("Indicies not supported"); + throw new SQLFeatureNotSupportedException("Indices not supported"); } @Override diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java index 5f0f523fb009f..f2ed709f3e573 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -107,7 +107,7 @@ public void testHttpWithSSLDisabledFromPropertyAndEnabledFromProtocol() throws E assertEquals("Cannot enable SSL: HTTPS protocol being used in the URL and SSL disabled in properties", e.getMessage()); } - public void testTimoutOverride() throws Exception { + public void testTimeoutOverride() throws Exception { Properties properties = new Properties(); properties.setProperty(CONNECT_TIMEOUT, "3"); // Should be overridden properties.setProperty(PAGE_TIMEOUT, "4"); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java index 019073b1b3b39..5c71f5e56c78b 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java @@ -54,7 +54,7 @@ public void testUnclosedMultilineComment() { assertEquals("Cannot parse given sql; unclosed /* comment", exception.getMessage()); } - public void testUnclosedSingleQuoteStrign() { + public void testUnclosedSingleQuoteString() { SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT ' '' '' ")); assertEquals("Cannot parse given sql; unclosed string", exception.getMessage()); } diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index 1dc765e242a2e..72b7600024237 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -377,7 +377,7 @@ public void testShowTablesWithLimitedAccess() throws Exception { .assertLogs(); } - public void testShowTablesWithLimitedAccessUnaccessableIndex() throws Exception { + public void testShowTablesWithLimitedAccessUnaccessibleIndex() throws Exception { createUser("read_bort", "read_bort"); actions.expectMatchesAdmin("SHOW TABLES LIKE 'not-created'", "read_bort", "SHOW TABLES LIKE 'test'"); diff --git a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle index cfc04f97188a4..82accbf386a94 100644 --- a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle @@ -206,7 +206,7 @@ integTestCluster { } } Closure notRunningFips = { - Boolean.parseBoolean(BuildPlugin.runJavascript(project, project.runtimeJavaHome, + Boolean.parseBoolean(BuildPlugin.runJavaScript(project, project.runtimeJavaHome, 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));')) == false } diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java index 658911fb8d4d3..44bc655de4b5c 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetIT.java @@ -10,7 +10,7 @@ /* * Integration testing class for "no security" (cluster running without the Security plugin, - * or the Security is disbled) scenario. Runs all tests in the base class. + * or the Security is disabled) scenario. Runs all tests in the base class. */ public class JdbcResultSetIT extends ResultSetTestCase { } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index 2817ab6df729e..2af3bcb612f1e 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -73,7 +73,7 @@ public static void assertResultSets(ResultSet expected, ResultSet actual, Logger * This means promoting integer types to long and floating types to double and comparing their values. * For example in a non-lenient, strict case a comparison between an int and a tinyint would fail, with lenientDataType it will succeed * as long as the actual value is the same. - * Also, has the option of treating the numeric results for floating point numbers in a leninent way, if chosen to. Usually, + * Also, has the option of treating the numeric results for floating point numbers in a lenient way, if chosen to. Usually, * we would want lenient treatment for floating point numbers in sql-spec tests where the comparison is being made with H2. */ public static void assertResultSets(ResultSet expected, ResultSet actual, Logger logger, boolean lenientDataType, diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index dbeeda5ee2d3c..b8d1629b82748 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -43,7 +43,7 @@ dependencyLicenses { /* * Bundle all dependencies into the main jar and mark it as executable it - * can be easilly shipped around and used. + * can be easily shipped around and used. */ jar { from({ diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java index c50d1da820edb..6096f5baf865d 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java @@ -161,7 +161,7 @@ private static String isKnownProperty(String propertyName, Collection kn if (knownOptions.contains(propertyName)) { return null; } - return "Unknown parameter [" + propertyName + "] ; did you mean " + StringUtils.findSimiliar(propertyName, knownOptions); + return "Unknown parameter [" + propertyName + "] ; did you mean " + StringUtils.findSimilar(propertyName, knownOptions); } protected T parseValue(String key, String value, Function parser) { diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java index 61e62c390ec11..dfb3250d566c4 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/RemoteFailure.java @@ -132,7 +132,7 @@ public Map> metadata() { } /** - * Cause of the remote failure. Mostly just useful for dbuegging errors that happen to be bugs. + * Cause of the remote failure. Mostly just useful for debugging errors that happen to be bugs. */ public RemoteFailure cause() { return cause; diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/StringUtils.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/StringUtils.java index e4e5bf4d98517..1539302242b27 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/StringUtils.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/StringUtils.java @@ -262,7 +262,7 @@ else if (m == 0) { return -1; } - public static List findSimiliar(CharSequence match, Collection potential) { + public static List findSimilar(CharSequence match, Collection potential) { List list = new ArrayList(3); // 1 switches or 1 extra char diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 2b1aa42277ea1..af6bf4e572937 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -485,7 +485,7 @@ private LogicalPlan dedupRight(LogicalPlan left, LogicalPlan right) { + " and right " + right.nodeString()); } - throw new UnsupportedOperationException("don't know how to resolve conficting IDs yet"); + throw new UnsupportedOperationException("don't know how to resolve conflicting IDs yet"); } } @@ -612,7 +612,7 @@ protected LogicalPlan rule(LogicalPlan plan) { .collect(toList())); - AttributeSet missing = resolvedRefs.substract(o.child().outputSet()); + AttributeSet missing = resolvedRefs.subtract(o.child().outputSet()); if (!missing.isEmpty()) { // Add missing attributes but project them away afterwards @@ -648,7 +648,7 @@ protected LogicalPlan rule(LogicalPlan plan) { .filter(Expression::resolved) .collect(toList())); - AttributeSet missing = resolvedRefs.substract(f.child().outputSet()); + AttributeSet missing = resolvedRefs.subtract(f.child().outputSet()); if (!missing.isEmpty()) { // Again, add missing attributes and project them away @@ -695,7 +695,7 @@ private static LogicalPlan propagateMissing(LogicalPlan plan, AttributeSet missi if (plan instanceof Project) { Project p = (Project) plan; - AttributeSet diff = missing.substract(p.child().outputSet()); + AttributeSet diff = missing.subtract(p.child().outputSet()); return new Project(p.location(), propagateMissing(p.child(), diff, failed), combine(p.projections(), missing)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index 189509e95114c..f1043b2196703 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -169,7 +169,7 @@ Collection verify(LogicalPlan plan) { if (!ua.customMessage()) { boolean useQualifier = ua.qualifier() != null; List potentialMatches = new ArrayList<>(); - for (Attribute a : p.intputSet()) { + for (Attribute a : p.inputSet()) { String nameCandidate = useQualifier ? a.qualifiedName() : a.name(); // add only primitives (object types would only result in another error) if ((a.dataType() != DataType.UNSUPPORTED) && a.dataType().isPrimitive()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 8d7d6b5bbee43..5e425a05614d0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -64,7 +64,7 @@ public enum IndexType { INDEX("BASE TABLE"), ALIAS("ALIAS"), // value for user types unrecognized - UNKNOWN("UKNOWN"); + UNKNOWN("UNKNOWN"); public static final EnumSet VALID = EnumSet.of(INDEX, ALIAS); @@ -369,22 +369,22 @@ private static EsField createField(String fieldName, Map props, boolean isAggregateable) { + private static EsField createField(String fieldName, String typeName, Map props, boolean isAggregatable) { DataType esType = DataType.fromTypeName(typeName); switch (esType) { case TEXT: return new TextEsField(fieldName, props, false); case KEYWORD: int length = DataType.KEYWORD.defaultPrecision; - // TODO: to check whether isSearchable/isAggregateable takes into account the presence of the normalizer + // TODO: to check whether isSearchable/isAggregatable takes into account the presence of the normalizer boolean normalized = false; - return new KeywordEsField(fieldName, props, isAggregateable, length, normalized); + return new KeywordEsField(fieldName, props, isAggregatable, length, normalized); case DATE: - return new DateEsField(fieldName, props, isAggregateable); + return new DateEsField(fieldName, props, isAggregatable); case UNSUPPORTED: return new UnsupportedEsField(fieldName, typeName); default: - return new EsField(fieldName, esType, props, isAggregateable); + return new EsField(fieldName, esType, props, isAggregatable); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java index 57dc8f6152e99..b5d137617722e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java @@ -175,7 +175,7 @@ void addAll(AttributeMap other) { delegate.putAll(other.delegate); } - public AttributeMap substract(AttributeMap other) { + public AttributeMap subtract(AttributeMap other) { AttributeMap diff = new AttributeMap<>(); for (Entry entry : this.delegate.entrySet()) { if (!other.delegate.containsKey(entry.getKey())) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java index 5d4065e5f3654..af290371dafd3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java @@ -57,8 +57,8 @@ void addAll(AttributeSet other) { delegate.addAll(other.delegate); } - public AttributeSet substract(AttributeSet other) { - return new AttributeSet(delegate.substract(other.delegate)); + public AttributeSet subtract(AttributeSet other) { + return new AttributeSet(delegate.subtract(other.delegate)); } public AttributeSet intersect(AttributeSet other) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java index fa1be78a594fb..bf394eea388c8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java @@ -258,7 +258,7 @@ protected String type() { * Preprocess a function that contains a star to some other * form before attempting to resolve it. For example, * {@code DISTINCT} doesn't support {@code *} so it converts - * this function into a dead end, unresolveable function. + * this function into a dead end, unresolvable function. * Or {@code COUNT(*)} can be rewritten to {@code COUNT(1)} * so we don't have to resolve {@code *}. */ diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java index 0dfed0d5c0057..6c3bea1310d7b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java @@ -13,7 +13,7 @@ import java.util.List; /** - * Find the arithmatic mean of a field. + * Find the arithmetic mean of a field. */ public class Avg extends NumericAggregate implements EnclosedAgg { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java index 0a59c4d52eaf5..4704358cc106c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java @@ -22,7 +22,7 @@ public abstract class DateTimeHistogramFunction extends DateTimeFunction { } /** - * used for aggregration (date histogram) + * used for aggregation (date histogram) */ public abstract long interval(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java index 0a49bb042f97b..ffb3d37ac534e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java @@ -13,7 +13,7 @@ import java.time.ZoneId; /** - * Exract the minute of the hour from a datetime. + * Extract the minute of the hour from a datetime. */ public class MinuteOfHour extends DateTimeFunction { public MinuteOfHour(Location location, Expression field, ZoneId zoneId) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java index 05244c2a74e95..cba97248798fa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java @@ -15,7 +15,7 @@ /** * Function that takes two parameters: one is the field/value itself, the other is a non-floating point numeric * which indicates how the rounding should behave. If positive, it will round the number till that parameter - * count digits after the decimal point. If negative, it will round the number till that paramter count + * count digits after the decimal point. If negative, it will round the number till that parameter count * digits before the decimal point, starting at the decimal point. */ public class Round extends BinaryNumericFunction { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java index e61ba739e5238..d4b5709d60292 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java @@ -12,7 +12,7 @@ /** * Sine - * fuction. + * function. */ public class Sin extends MathFunction { public Sin(Location location, Expression field) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java index 42021cad5901e..402b49fe59dd9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java @@ -103,9 +103,9 @@ public Object fold() { Object val = value.fold(); Integer lowerCompare = BinaryComparison.compare(lower.fold(), val); Integer upperCompare = BinaryComparison.compare(val, upper().fold()); - boolean lowerComparsion = lowerCompare == null ? false : (includeLower ? lowerCompare <= 0 : lowerCompare < 0); - boolean upperComparsion = upperCompare == null ? false : (includeUpper ? upperCompare <= 0 : upperCompare < 0); - return lowerComparsion && upperComparsion; + boolean lowerComparison = lowerCompare == null ? false : (includeLower ? lowerCompare <= 0 : lowerCompare < 0); + boolean upperComparison = upperCompare == null ? false : (includeUpper ? upperCompare <= 0 : upperCompare < 0); + return lowerComparison && upperComparison; } /** diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java index 1b7afa203077d..a0fd57e30d0ca 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java @@ -76,7 +76,7 @@ public enum BinaryArithmeticOperation implements PredicateBiFunction) { - throw new SqlIllegalArgumentException("Cannot substract a date from an interval; do you mean the reverse?"); + throw new SqlIllegalArgumentException("Cannot subtract a date from an interval; do you mean the reverse?"); } throw new SqlIllegalArgumentException("Cannot compute [-] between [{}] [{}]", l.getClass().getSimpleName(), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java index 82233e250e364..f838892035545 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/InProcessor.java @@ -18,14 +18,14 @@ public class InProcessor implements Processor { public static final String NAME = "in"; - private final List processsors; + private final List processors; InProcessor(List processors) { - this.processsors = processors; + this.processors = processors; } public InProcessor(StreamInput in) throws IOException { - processsors = in.readNamedWriteableList(Processor.class); + processors = in.readNamedWriteableList(Processor.class); } @Override @@ -35,13 +35,13 @@ public String getWriteableName() { @Override public final void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(processsors); + out.writeNamedWriteableList(processors); } @Override public Object process(Object input) { - Object leftValue = processsors.get(processsors.size() - 1).process(input); - return apply(leftValue, Processors.process(processsors.subList(0, processsors.size() - 1), leftValue)); + Object leftValue = processors.get(processors.size() - 1).process(input); + return apply(leftValue, Processors.process(processors.subList(0, processors.size() - 1), leftValue)); } public static Boolean apply(Object input, List values) { @@ -62,11 +62,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; InProcessor that = (InProcessor) o; - return Objects.equals(processsors, that.processsors); + return Objects.equals(processors, that.processors); } @Override public int hashCode() { - return Objects.hash(processsors); + return Objects.hash(processors); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java index e56aa7819fc22..3b617525864cc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java @@ -41,7 +41,7 @@ public AttributeSet outputSet() { return lazyOutputSet; } - public AttributeSet intputSet() { + public AttributeSet inputSet() { if (lazyInputSet == null) { List attrs = new ArrayList<>(); for (PlanType child : children()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 4f071ee50f4f1..f247f3c823047 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -297,7 +297,7 @@ else if (exp instanceof GroupingFunction) { } } else { - throw new SqlIllegalArgumentException("Unsupproted grouping function {}", exp); + throw new SqlIllegalArgumentException("Unsupported grouping function {}", exp); } } // bumped into into an invalid function (which should be caught by the verifier) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index d4d4ce16fb8ab..1d762dd3b0f82 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -63,10 +63,10 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli * {@link XContent} outputs we can't use {@link RestToXContentListener} * like everything else. We want to stick as closely as possible to * Elasticsearch's defaults though, while still layering in ways to - * control the output more easilly. + * control the output more easily. * * First we find the string that the user used to specify the response - * format. If there is a {@code format} paramter we use that. If there + * format. If there is a {@code format} parameter we use that. If there * isn't but there is a {@code Accept} header then we use that. If there * isn't then we use the {@code Content-Type} header which is required. */ diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index f8d0393303d64..6c1b3ef343a85 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -74,7 +74,7 @@ public static String encodeToString(Version version, Cursor info) { } return os.toString(StandardCharsets.UTF_8.name()); } catch (Exception ex) { - throw new SqlIllegalArgumentException("Unexpected failure retriving next page", ex); + throw new SqlIllegalArgumentException("Unexpected failure retrieving next page", ex); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 5a786441d3300..00c6f4b30595e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -384,7 +384,7 @@ public void testInvalidTypeForNumericFunction_WithTwoArgs() { error("SELECT TRUNCATE(1.2, 'bar')")); } - public void testInvalidTypeForBooleanFuntion_WithTwoArgs() { + public void testInvalidTypeForBooleanFunction_WithTwoArgs() { assertEquals("1:8: [OR] first argument must be [boolean], found value [1] type [integer]", error("SELECT 1 OR true")); assertEquals("1:8: [OR] second argument must be [boolean], found value [2] type [integer]", @@ -455,10 +455,10 @@ public void testConditionalWithDifferentDataTypes_SelectClause() { error("SELECT 1 = 1 OR " + function + "(3, '4') > 1")); @SuppressWarnings("unchecked") - String arbirtraryArgsfunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); - assertEquals("1:" + (34 + arbirtraryArgsfunction.length()) + + String arbitraryArgsfunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); + assertEquals("1:" + (34 + arbitraryArgsfunction.length()) + ": expected data type [INTEGER], value provided is of type [KEYWORD]", - error("SELECT 1 = 1 OR " + arbirtraryArgsfunction + "(null, null, 3, '4') > 1")); + error("SELECT 1 = 1 OR " + arbitraryArgsfunction + "(null, null, 3, '4') > 1")); } public void testConditionalWithDifferentDataTypes_WhereClause() { @@ -469,10 +469,10 @@ public void testConditionalWithDifferentDataTypes_WhereClause() { error("SELECT * FROM test WHERE " + function + "('foo', 4) > 1")); @SuppressWarnings("unchecked") - String arbirtraryArgsfunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); - assertEquals("1:" + (46 + arbirtraryArgsfunction.length()) + + String arbitraryArgsfunction = randomFrom(Coalesce.class, Greatest.class, Least.class).getSimpleName(); + assertEquals("1:" + (46 + arbitraryArgsfunction.length()) + ": expected data type [KEYWORD], value provided is of type [INTEGER]", - error("SELECT * FROM test WHERE " + arbirtraryArgsfunction + "(null, null, 'foo', 4) > 1")); + error("SELECT * FROM test WHERE " + arbitraryArgsfunction + "(null, null, 'foo', 4) > 1")); } public void testAggsInWhere() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java index 7ef57972b3131..a61c27fb8489a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java @@ -153,7 +153,7 @@ public static Map> fromMappings(EsIndex.. if (entry.getValue().size() > 1) { for (EsIndex index : indices) { EsField field = index.mapping().get(fieldName); - UpdateableFieldCapabilities fieldCaps = (UpdateableFieldCapabilities) caps.get(field.getDataType().esType); + UpdatableFieldCapabilities fieldCaps = (UpdatableFieldCapabilities) caps.get(field.getDataType().esType); fieldCaps.indices.add(index.name()); } //TODO: what about nonAgg/SearchIndices? @@ -171,12 +171,12 @@ private static void addFieldCaps(String parent, EsField field, String indexName, merged.put(fieldName, map); } FieldCapabilities caps = map.computeIfAbsent(field.getDataType().esType, - esType -> new UpdateableFieldCapabilities(fieldName, esType, + esType -> new UpdatableFieldCapabilities(fieldName, esType, isSearchable(field.getDataType()), isAggregatable(field.getDataType()))); if (!field.isAggregatable()) { - ((UpdateableFieldCapabilities) caps).nonAggregatableIndices.add(indexName); + ((UpdatableFieldCapabilities) caps).nonAggregatableIndices.add(indexName); } for (EsField nested : field.getProperties().values()) { @@ -192,12 +192,12 @@ private static boolean isAggregatable(DataType type) { return type.isNumeric() || type == DataType.KEYWORD || type == DataType.DATE; } - private static class UpdateableFieldCapabilities extends FieldCapabilities { + private static class UpdatableFieldCapabilities extends FieldCapabilities { List indices = new ArrayList<>(); List nonSearchableIndices = new ArrayList<>(); List nonAggregatableIndices = new ArrayList<>(); - UpdateableFieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { + UpdatableFieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { super(name, type, isSearchable, isAggregatable); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java index fa85ca9cbff12..c80509cc06970 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java @@ -79,19 +79,19 @@ public void testSingleItemConstructor() { assertThat(m.containsValue("on"), is(false)); } - public void testSubstract() { + public void testSubtract() { AttributeMap m = threeMap(); AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); AttributeMap empty = new AttributeMap<>(); - assertThat(m.substract(empty), is(m)); - assertThat(m.substract(m), is(empty)); - assertThat(mo.substract(m), is(empty)); + assertThat(m.subtract(empty), is(m)); + assertThat(m.subtract(m), is(empty)); + assertThat(mo.subtract(m), is(empty)); - AttributeMap substract = m.substract(mo); + AttributeMap subtract = m.subtract(mo); - assertThat(substract.size(), is(2)); - assertThat(substract.attributeNames(), contains("two", "three")); + assertThat(subtract.size(), is(2)); + assertThat(subtract.attributeNames(), contains("two", "three")); } public void testIntersect() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java index b43f1bae51a84..585d8afa21359 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/StringFunctionProcessorTests.java @@ -210,7 +210,7 @@ public void testOctetLength() { assertEquals(0, proc.process("")); assertEquals(1, proc.process('f')); assertEquals(3, proc.process('\u20ac')); // euro symbol - // euro (3), lamda (2), theta (2), 'white sun with rays' (3), math 'A' (4) symbols + // euro (3), lambda (2), theta (2), 'white sun with rays' (3), math 'A' (4) symbols assertEquals(14, proc.process("\u20ac\u039B\u03F4\u263C\u1D400")); stringCharInputValidation(proc); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/BinaryPipesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/BinaryPipesTests.java index 991036d2da3ac..f0a21e6fe83d5 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/BinaryPipesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/pipeline/BinaryPipesTests.java @@ -69,7 +69,7 @@ public void collectFields(SqlSourceBuilder sourceBuilder) { } /** - * Returns {@code true} if the processor defintion builds a query that + * Returns {@code true} if the processor definition builds a query that * tracks scores, {@code false} otherwise. Used for testing * {@link Pipe#collectFields(SqlSourceBuilder)}. */ diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticTests.java index 2618392a067bd..ab1556db76d05 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticTests.java @@ -139,7 +139,7 @@ public void testSubYearMonthIntervalToDateIllegal() { TemporalAmount t = Period.ofYears(100).plusMonths(50); Literal r = interval(t, INTERVAL_HOUR); SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> sub(r, l)); - assertEquals("Cannot substract a date from an interval; do you mean the reverse?", ex.getMessage()); + assertEquals("Cannot subtract a date from an interval; do you mean the reverse?", ex.getMessage()); } public void testSubNumberFromIntervalIllegal() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java index a8145d9f3bf58..27e71fee215f0 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java @@ -74,8 +74,8 @@ * and any other interesting things that that they do but we're a * long way from that and this gets the job done for now. *

    - * This test attempts to use reflection to create believeable nodes - * and manipulate them in believeable ways with as little knowledge + * This test attempts to use reflection to create believable nodes + * and manipulate them in believable ways with as little knowledge * of the actual subclasses as possible. This is problematic because * it is possible, for example, for nodes to stackoverflow because * they can contain themselves. So this class diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 3ea99e5787fe0..e308c06c7da36 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -132,7 +132,7 @@ import org.elasticsearch.xpack.watcher.notification.email.attachment.DataAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; -import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachementParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.attachment.ReportingAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.support.BodyPartSource; import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; @@ -290,7 +290,7 @@ public Collection createComponents(Client client, ClusterService cluster TextTemplateEngine templateEngine = new TextTemplateEngine(scriptService); Map emailAttachmentParsers = new HashMap<>(); - emailAttachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, templateEngine)); + emailAttachmentParsers.put(HttpEmailAttachmentParser.TYPE, new HttpEmailAttachmentParser(httpClient, templateEngine)); emailAttachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser()); emailAttachmentParsers.put(ReportingAttachmentParser.TYPE, new ReportingAttachmentParser(settings, httpClient, templateEngine)); EmailAttachmentsParser emailAttachmentsParser = new EmailAttachmentsParser(emailAttachmentParsers); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java index 02c0e1167dd95..631d11196a839 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java @@ -259,7 +259,7 @@ static Properties loadSmtpProperties(Settings settings) { settings = builder.build(); Properties props = new Properties(); - // Secure strings can not be retreived out of a settings object and should be handled differently + // Secure strings can not be retrieved out of a settings object and should be handled differently Set insecureSettings = settings.filter(s -> s.startsWith("secure_") == false).keySet(); for (String key : insecureSettings) { props.setProperty(SMTP_SETTINGS_PREFIX + key, settings.get(key)); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java index c7be7e6db0f30..2b4bc10bc6958 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java @@ -173,7 +173,7 @@ static PolicyFactory createCommonPolicy(List allow, List disallo /** * An {@code img} tag policy that only accept {@code cid:} values in its {@code src} attribute. - * If such value is found, the content id is verified against the available attachements of the + * If such value is found, the content id is verified against the available attachments of the * email and if the content/attachment is not found, the element is dropped. */ private static class EmbeddedImgOnlyPolicy implements ElementPolicy { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachmentParser.java similarity index 96% rename from x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParser.java rename to x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachmentParser.java index 076c57c832fb4..9c5389880a7c4 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachmentParser.java @@ -24,7 +24,7 @@ import java.io.IOException; import java.util.Map; -public class HttpEmailAttachementParser implements EmailAttachmentParser { +public class HttpEmailAttachmentParser implements EmailAttachmentParser { public interface Fields { ParseField INLINE = new ParseField("inline"); @@ -36,7 +36,7 @@ public interface Fields { private final HttpClient httpClient; private final TextTemplateEngine templateEngine; - public HttpEmailAttachementParser(HttpClient httpClient, TextTemplateEngine templateEngine) { + public HttpEmailAttachmentParser(HttpClient httpClient, TextTemplateEngine templateEngine) { this.httpClient = httpClient; this.templateEngine = templateEngine; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java index 44e65f49ad476..6d29bd9e29353 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java @@ -48,13 +48,13 @@ public boolean inline() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(id) - .startObject(HttpEmailAttachementParser.TYPE) - .field(HttpEmailAttachementParser.Fields.REQUEST.getPreferredName(), requestTemplate, params); + .startObject(HttpEmailAttachmentParser.TYPE) + .field(HttpEmailAttachmentParser.Fields.REQUEST.getPreferredName(), requestTemplate, params); if (Strings.hasLength(contentType)) { - builder.field(HttpEmailAttachementParser.Fields.CONTENT_TYPE.getPreferredName(), contentType); + builder.field(HttpEmailAttachmentParser.Fields.CONTENT_TYPE.getPreferredName(), contentType); } if (inline) { - builder.field(HttpEmailAttachementParser.Fields.INLINE.getPreferredName(), inline); + builder.field(HttpEmailAttachmentParser.Fields.INLINE.getPreferredName(), inline); } return builder.endObject().endObject(); } @@ -65,7 +65,7 @@ public static Builder builder(String id) { @Override public String type() { - return HttpEmailAttachementParser.TYPE; + return HttpEmailAttachmentParser.TYPE; } @Override diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java index d0d9ecc78104c..76678b47b261f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java @@ -197,7 +197,7 @@ private HttpResponse requestReportGeneration(String watchId, String attachmentId * Extract the id from JSON payload, so we know which ID to poll for */ private String extractIdFromJson(String watchId, String attachmentId, BytesReference body) throws IOException { - // EMPTY is safe here becaus we never call namedObject + // EMPTY is safe here because we never call namedObject try (InputStream stream = body.streamInput(); XContentParser parser = JsonXContent.jsonXContent .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java index ab316c3dd1001..17d7f4ec40ab9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java @@ -128,7 +128,7 @@ public Watch parse(String id, boolean includeStatus, WatcherXContentParser parse List actions = defaultActions; ExecutableTransform transform = null; TimeValue throttlePeriod = null; - Map metatdata = null; + Map metadata = null; WatchStatus status = null; long version = Versions.MATCH_ANY; @@ -162,7 +162,7 @@ public Watch parse(String id, boolean includeStatus, WatcherXContentParser parse } else if (WatchField.ACTIONS.match(currentFieldName, parser.getDeprecationHandler())) { actions = actionRegistry.parseActions(id, parser); } else if (WatchField.METADATA.match(currentFieldName, parser.getDeprecationHandler())) { - metatdata = parser.map(); + metadata = parser.map(); } else if (WatchField.VERSION.match(currentFieldName, parser.getDeprecationHandler())) { version = parser.longValue(); } else if (WatchField.STATUS.match(currentFieldName, parser.getDeprecationHandler())) { @@ -198,6 +198,6 @@ public Watch parse(String id, boolean includeStatus, WatcherXContentParser parse } - return new Watch(id, trigger, input, condition, transform, throttlePeriod, actions, metatdata, status, version); + return new Watch(id, trigger, input, condition, transform, throttlePeriod, actions, metadata, status, version); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 467966e96fdbf..46f4d8dd96d4e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -503,10 +503,10 @@ public void testWatcherReloadsOnNodeOutageWithWatcherShard() { .build(); ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, localNodeId, false, STARTED); - ShardRouting primartShardRouting = TestShardRouting.newShardRouting(shardId, outageNodeId, true, STARTED); + ShardRouting primaryShardRouting = TestShardRouting.newShardRouting(shardId, outageNodeId, true, STARTED); IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) .addShard(replicaShardRouting) - .addShard(primartShardRouting) + .addShard(primaryShardRouting) .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java index a8cb1f7d2fac1..929e9098b039f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java @@ -41,7 +41,7 @@ import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachments; import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; -import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachementParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpRequestAttachment; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; @@ -86,7 +86,7 @@ public class EmailActionTests extends ESTestCase { @Before public void addEmailAttachmentParsers() { Map emailAttachmentParsers = new HashMap<>(); - emailAttachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, + emailAttachmentParsers.put(HttpEmailAttachmentParser.TYPE, new HttpEmailAttachmentParser(httpClient, new MockTextTemplateEngine())); emailAttachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser()); emailAttachmentParser = new EmailAttachmentsParser(emailAttachmentParsers); @@ -512,7 +512,7 @@ public void testThatOneFailedEmailAttachmentResultsInActionFailure() throws Exce // setup email attachment parsers Map attachmentParsers = new HashMap<>(); - attachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, engine)); + attachmentParsers.put(HttpEmailAttachmentParser.TYPE, new HttpEmailAttachmentParser(httpClient, engine)); EmailAttachmentsParser emailAttachmentsParser = new EmailAttachmentsParser(attachmentParsers); XContentBuilder builder = jsonBuilder().startObject() diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java index 3ffd8dbf548e6..b9f336a189185 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java @@ -121,7 +121,7 @@ public void testDefaultFormattingAllowed() { assertThat(sanitizedHtml, equalTo(html)); } - public void testDefaultSciptsDisallowed() { + public void testDefaultScriptsDisallowed() { String html = "This was a dangerous script"; HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); String sanitizedHtml = sanitizer.sanitize(html); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParserTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachmentParserTests.java similarity index 90% rename from x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParserTests.java rename to x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachmentParserTests.java index 2b0e632b0cac7..dd55ed4cefe32 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParserTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachmentParserTests.java @@ -39,7 +39,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class HttpEmailAttachementParserTests extends ESTestCase { +public class HttpEmailAttachmentParserTests extends ESTestCase { private HttpClient httpClient; private EmailAttachmentsParser emailAttachmentsParser; @@ -50,8 +50,8 @@ public void init() throws Exception { httpClient = mock(HttpClient.class); attachmentParsers = new HashMap<>(); - attachmentParsers.put(HttpEmailAttachementParser.TYPE, - new HttpEmailAttachementParser(httpClient, new MockTextTemplateEngine())); + attachmentParsers.put(HttpEmailAttachmentParser.TYPE, + new HttpEmailAttachmentParser(httpClient, new MockTextTemplateEngine())); emailAttachmentsParser = new EmailAttachmentsParser(attachmentParsers); } @@ -62,7 +62,7 @@ public void testSerializationWorks() throws Exception { String id = "some-id"; XContentBuilder builder = jsonBuilder().startObject().startObject(id) - .startObject(HttpEmailAttachementParser.TYPE) + .startObject(HttpEmailAttachmentParser.TYPE) .startObject("request") .field("scheme", "http") .field("host", "test.de") @@ -105,7 +105,7 @@ public void testNonOkHttpCodeThrowsException() throws Exception { WatchExecutionContext ctx = createWatchExecutionContext(); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> attachmentParsers.get(HttpEmailAttachementParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); + () -> attachmentParsers.get(HttpEmailAttachmentParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); assertThat(exception.getMessage(), is("Watch[watch1] attachment[someid] HTTP error status host[localhost], port[80], " + "method[GET], path[foo], status[403]")); } @@ -119,7 +119,7 @@ public void testEmptyResponseThrowsException() throws Exception { WatchExecutionContext ctx = createWatchExecutionContext(); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> attachmentParsers.get(HttpEmailAttachementParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); + () -> attachmentParsers.get(HttpEmailAttachmentParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); assertThat(exception.getMessage(), is("Watch[watch1] attachment[someid] HTTP empty response body host[localhost], port[80], " + "method[GET], path[foo], status[200]")); } @@ -132,7 +132,7 @@ public void testHttpClientThrowsException() throws Exception { WatchExecutionContext ctx = createWatchExecutionContext(); IOException exception = expectThrows(IOException.class, - () -> attachmentParsers.get(HttpEmailAttachementParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); + () -> attachmentParsers.get(HttpEmailAttachmentParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); assertThat(exception.getMessage(), is("whatever")); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java index 5ec2e639b9e89..531c074112d66 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java @@ -33,7 +33,7 @@ public void testCreateCtxModel() throws Exception { DateTime triggeredTime = scheduledTime.plusMillis(50); DateTime executionTime = triggeredTime.plusMillis(50); Payload payload = new Payload.Simple(singletonMap("payload_key", "payload_value")); - Map metatdata = singletonMap("metadata_key", "metadata_value"); + Map metadata = singletonMap("metadata_key", "metadata_value"); TriggerEvent event = new ScheduleTriggerEvent("_watch_id", triggeredTime, scheduledTime); Wid wid = new Wid("_watch_id", executionTime); WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContextBuilder("_watch_id") @@ -41,7 +41,7 @@ public void testCreateCtxModel() throws Exception { .executionTime(executionTime) .triggerEvent(event) .payload(payload) - .metadata(metatdata) + .metadata(metadata) .buildMock(); Map model = Variables.createCtxParamsMap(ctx, payload); @@ -57,6 +57,6 @@ public void testCreateCtxModel() throws Exception { assertThat(ObjectPath.eval("ctx.execution_time", model), Matchers.hasToString(jodaJavaExecutionTime.toString())); assertThat(ObjectPath.eval("ctx.trigger", model), is(event.data())); assertThat(ObjectPath.eval("ctx.payload", model), is(payload.data())); - assertThat(ObjectPath.eval("ctx.metadata", model), is(metatdata)); + assertThat(ObjectPath.eval("ctx.metadata", model), is(metadata)); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java index bb5a6eabdd5b9..849e1ac9c1b0b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java @@ -160,9 +160,9 @@ public static Watch createTestWatch(String watchName, Client client, HttpClient EmailTemplate email = EmailTemplate.builder().from("from@test.com").to("to@test.com").build(); Authentication auth = new Authentication("testname", new Secret("testpassword".toCharArray())); EmailAction action = new EmailAction(email, "testaccount", auth, Profile.STANDARD, null, null); - ExecutableEmailAction executale = new ExecutableEmailAction(action, logger, emailService, engine, + ExecutableEmailAction executable = new ExecutableEmailAction(action, logger, emailService, engine, new HtmlSanitizer(Settings.EMPTY), Collections.emptyMap()); - actions.add(new ActionWrapper("_email", null, null, null, executale)); + actions.add(new ActionWrapper("_email", null, null, null, executable)); DateTime now = DateTime.now(UTC); Map statuses = new HashMap<>(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index 5a5c5c020dacd..7dfde3e3e6cac 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -285,7 +285,7 @@ private void assertSingleExecutionAndCompleteWatchHistory(final long numberOfWat long notExecutedCount = Arrays.stream(historySearchResponse.getHits().getHits()) .filter(hit -> hit.getSourceAsMap().get("state").equals(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED.id())) .count(); - logger.info("Watches not executed: [{}]: expected watch history count [{}] - [{}] successful watch exections", + logger.info("Watches not executed: [{}]: expected watch history count [{}] - [{}] successful watch executions", notExecutedCount, expectedWatchHistoryCount, successfulWatchExecutions); assertThat(notExecutedCount, is(expectedWatchHistoryCount - successfulWatchExecutions)); }, 20, TimeUnit.SECONDS); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java index 5c9dafeaca001..65d19ccf87255 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -185,8 +185,8 @@ public void testThatHistoryContainsStatus() throws Exception { String ackStatusState = source.getValue("status.actions._logger.ack.state").toString().toUpperCase(Locale.ROOT); assertThat(ackStatusState, is(actionStatus.ackStatus().state().toString())); - Boolean lastExecutionSuccesful = source.getValue("status.actions._logger.last_execution.successful"); - assertThat(lastExecutionSuccesful, is(actionStatus.lastExecution().successful())); + Boolean lastExecutionSuccessful = source.getValue("status.actions._logger.last_execution.successful"); + assertThat(lastExecutionSuccessful, is(actionStatus.lastExecution().successful())); // also ensure that the status field is disabled in the watch history GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java index 0de8fc1ee4bc0..09e0642dcc527 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -179,7 +179,7 @@ public void testWebhookAction() throws Exception { .get(); // verifying the basic auth password is stored encrypted in the index when security - // is enabled, when it's not enabled, the the passowrd should be stored in plain text + // is enabled, when it's not enabled, the the password should be stored in plain text GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); assertThat(response, notNullValue()); assertThat(response.getId(), is("_id")); diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java index c0886f953fee1..e26b13b4ba651 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java @@ -95,7 +95,7 @@ public void testWhenKeyTabWithInvalidContentFailsValidation() } } - public void testValidKebrerosTicket() throws PrivilegedActionException, GSSException, LoginException { + public void testValidKerberosTicket() throws PrivilegedActionException, GSSException, LoginException { // Client login and init token preparation final String clientUserName = randomFrom(clientUserNames); final SecureString password = new SecureString("pwd".toCharArray()); diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java index 8888ce33be57f..f309338234ad4 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java @@ -222,7 +222,7 @@ public Void run() throws Exception { return null; } }); - logger.info("SimpleKdcServer stoppped."); + logger.info("SimpleKdcServer stopped."); } private static int getServerPort(String transport) { diff --git a/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SpnegoHttpClientConfigCallbackHandler.java b/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SpnegoHttpClientConfigCallbackHandler.java index e5768d8f2e944..d449d0ad9913f 100644 --- a/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SpnegoHttpClientConfigCallbackHandler.java +++ b/x-pack/qa/kerberos-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SpnegoHttpClientConfigCallbackHandler.java @@ -253,7 +253,7 @@ public void handle(final Callback[] callbacks) throws IOException, UnsupportedCa * customizable we are constructing it in memory. *

    * As we are using this instead of jaas.conf, this requires refresh of - * {@link Configuration} and reqires appropriate security permissions to do so. + * {@link Configuration} and requires appropriate security permissions to do so. */ private static class PasswordJaasConf extends AbstractJaasConf { diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index e3bbf6e613f4e..e3c533d69a10c 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.esplugin' esplugin { name 'spi-extension' - description 'An example spi extension pluing for xpack security' + description 'An example spi extension plugin for xpack security' classname 'org.elasticsearch.example.SpiExtensionPlugin' extendedPlugins = ['x-pack-security'] } diff --git a/x-pack/qa/security-tools-tests/build.gradle b/x-pack/qa/security-tools-tests/build.gradle index 5df22c557db3c..98d3659e137f7 100644 --- a/x-pack/qa/security-tools-tests/build.gradle +++ b/x-pack/qa/security-tools-tests/build.gradle @@ -10,7 +10,7 @@ dependencies { // add test resources from security, so certificate tool tests can use example certs sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) -// we have to repeate these patterns because the security test resources are effectively in the src of this project +// we have to repeat these patterns because the security test resources are effectively in the src of this project forbiddenPatterns { exclude '**/*.key' exclude '**/*.p12' diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 1b3b3f1bbeb1b..2341878d92489 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -149,7 +149,7 @@ public void testSearchInputHasPermissions() throws Exception { builder.startObject("condition").startObject("compare").startObject("ctx.payload.hits.total").field("gte", 1) .endObject().endObject().endObject(); builder.startObject("actions").startObject("logging").startObject("logging") - .field("text", "successfully ran " + watchId + "to test for search inpput").endObject().endObject().endObject(); + .field("text", "successfully ran " + watchId + "to test for search input").endObject().endObject().endObject(); builder.endObject(); indexWatch(watchId, builder);