diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ba3e3c1175b2c..0d8826ad8d61a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -320,7 +320,7 @@ have to test Elasticsearch.
#### Configurations
Gradle organizes dependencies and build artifacts into "configurations" and
-allows you to use these configurations arbitrarilly. Here are some of the most
+allows you to use these configurations arbitrarily. Here are some of the most
common configurations in our build and how we use them:
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index d3321f7f8cc6e..55d41dee1eecd 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -250,7 +250,7 @@ Pass arbitrary jvm arguments.
Running backwards compatibility tests is disabled by default since it
requires a release version of elasticsearch to be present on the test system.
-To run backwards compatibilty tests untar or unzip a release and run the tests
+To run backwards compatibility tests untar or unzip a release and run the tests
with the following command:
---------------------------------------------------------------------------
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy
index daab0efc8c69a..063dcf7d3bb7d 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy
@@ -122,7 +122,7 @@ class VersionCollection {
if (isReleased(version) == false) {
// caveat 1 - This should only ever contain 2 non released branches in flight. An example is 6.x is frozen,
// and 6.2 is cut but not yet released there is some simple logic to make sure that in the case of more than 2,
- // it will bail. The order is that the minor snapshot is fufilled first, and then the staged minor snapshot
+ // it will bail. The order is that the minor snapshot is fulfilled first, and then the staged minor snapshot
if (nextMinorSnapshot == null) {
// it has not been set yet
nextMinorSnapshot = replaceAsSnapshot(version)
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
index f2105086f2553..ac258f76e3322 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
@@ -72,7 +72,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
/**
* Root directory containing all the files generated by this task. It is
- * contained withing testRoot.
+ * contained within testRoot.
*/
File outputRoot() {
return new File(testRoot, '/rest-api-spec/test')
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
index aaf4e468182a9..b96bdfae200ac 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
@@ -337,7 +337,7 @@ class NodeInfo {
case 'deb':
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
default:
- throw new InvalidUserDataException("Unkown distribution: ${distro}")
+ throw new InvalidUserDataException("Unknown distribution: ${distro}")
}
}
}
diff --git a/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle b/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle
index 95d1453025e92..c87c097e6beb6 100644
--- a/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle
+++ b/buildSrc/src/testKit/elasticsearch-build-resources/build.gradle
@@ -22,7 +22,7 @@ task sample {
// dependsOn buildResources.outputDir
// for now it's just
dependsOn buildResources
- // we have to refference it at configuration time in order to be picked up
+ // we have to reference it at configuration time in order to be picked up
ext.checkstyle_suppressions = buildResources.copy('checkstyle_suppressions.xml')
doLast {
println "This task is using ${file(checkstyle_suppressions)}"
@@ -35,4 +35,4 @@ task noConfigAfterExecution {
println "This should cause an error because we are refferencing " +
"${buildResources.copy('checkstyle_suppressions.xml')} after the `buildResources` task has ran."
}
-}
\ No newline at end of file
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java
index e1af60269b52b..44fc18032d29b 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/Detector.java
@@ -215,7 +215,7 @@ public boolean isUseNull() {
}
/**
- * Excludes frequently-occuring metrics from the analysis;
+ * Excludes frequently-occurring metrics from the analysis;
* can apply to 'by' field, 'over' field, or both
*
* @return the value that the user set
diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java
index c19c12cfe4444..d74f106c50ba4 100644
--- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java
+++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java
@@ -228,7 +228,7 @@ static void parse(
// no range is present, apply the JVM option to the specified major version only
upper = lower;
} else if (end == null) {
- // a range of the form \\d+- is present, apply the JVM option to all major versions larger than the specifed one
+ // a range of the form \\d+- is present, apply the JVM option to all major versions larger than the specified one
upper = Integer.MAX_VALUE;
} else {
// a range of the form \\d+-\\d+ is present, apply the JVM option to the specified range of major versions
diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc
index 71fadd98988a3..38104215720d7 100644
--- a/docs/java-rest/low-level/usage.asciidoc
+++ b/docs/java-rest/low-level/usage.asciidoc
@@ -307,7 +307,7 @@ You can also customize the response consumer used to buffer the asynchronous
responses. The default consumer will buffer up to 100MB of response on the
JVM heap. If the response is larger then the request will fail. You could,
for example, lower the maximum size which might be useful if you are running
-in a heap constrained environment like the exmaple above.
+in a heap constrained environment like the example above.
Once you've created the singleton you can use it when making requests:
diff --git a/docs/painless/painless-api-reference.asciidoc b/docs/painless/painless-api-reference.asciidoc
index 54b1f20977b61..814824b0db9a2 100644
--- a/docs/painless/painless-api-reference.asciidoc
+++ b/docs/painless/painless-api-reference.asciidoc
@@ -3,7 +3,7 @@
Painless has a strict whitelist for methods and classes to ensure all
painless scripts are secure. Most of these methods are exposed directly
-from the Java Runtime Enviroment (JRE) while others are part of
+from the Java Runtime Environment (JRE) while others are part of
Elasticsearch or Painless itself. Below is a list of all available
classes grouped with their respected methods. Clicking on the method
name takes you to the documentation for that specific method. Methods
diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc
index ffd5ecebc2536..1b975ef761d4a 100644
--- a/docs/plugins/repository-hdfs.asciidoc
+++ b/docs/plugins/repository-hdfs.asciidoc
@@ -32,7 +32,7 @@ PUT _snapshot/my_hdfs_repository
"type": "hdfs",
"settings": {
"uri": "hdfs://namenode:8020/",
- "path": "elasticsearch/respositories/my_hdfs_repository",
+ "path": "elasticsearch/repositories/my_hdfs_repository",
"conf.dfs.client.read.shortcircuit": "true"
}
}
@@ -149,7 +149,7 @@ PUT _snapshot/my_hdfs_repository
"type": "hdfs",
"settings": {
"uri": "hdfs://namenode:8020/",
- "path": "/user/elasticsearch/respositories/my_hdfs_repository",
+ "path": "/user/elasticsearch/repositories/my_hdfs_repository",
"security.principal": "elasticsearch@REALM"
}
}
@@ -167,7 +167,7 @@ PUT _snapshot/my_hdfs_repository
"type": "hdfs",
"settings": {
"uri": "hdfs://namenode:8020/",
- "path": "/user/elasticsearch/respositories/my_hdfs_repository",
+ "path": "/user/elasticsearch/repositories/my_hdfs_repository",
"security.principal": "elasticsearch/_HOST@REALM"
}
}
@@ -186,4 +186,4 @@ extracts for file access checks will be `elasticsearch`.
NOTE: The repository plugin makes no assumptions of what Elasticsearch's principal name is. The main fragment of the
Kerberos principal is not required to be `elasticsearch`. If you have a principal or service name that works better
-for you or your organization then feel free to use it instead!
\ No newline at end of file
+for you or your organization then feel free to use it instead!
diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc
index e0c6c701e31fe..4b04f95445ef6 100644
--- a/docs/reference/commands/certutil.asciidoc
+++ b/docs/reference/commands/certutil.asciidoc
@@ -72,7 +72,7 @@ parameter or in the `filename` field in an input YAML file.
You can optionally provide IP addresses or DNS names for each instance. If
neither IP addresses nor DNS names are specified, the Elastic stack products
cannot perform hostname verification and you might need to configure the
-`verfication_mode` security setting to `certificate` only. For more information
+`verification_mode` security setting to `certificate` only. For more information
about this setting, see <>.
All certificates that are generated by this command are signed by a CA. You can
diff --git a/docs/reference/ml/apis/jobcounts.asciidoc b/docs/reference/ml/apis/jobcounts.asciidoc
index d0169e228d549..e6af7ac569cd7 100644
--- a/docs/reference/ml/apis/jobcounts.asciidoc
+++ b/docs/reference/ml/apis/jobcounts.asciidoc
@@ -207,7 +207,7 @@ The `forecasts_stats` object shows statistics about forecasts. It has the follow
(object) Counts per forecast status, for example: {"finished" : 2}.
NOTE: `memory_bytes`, `records`, `processing_time_ms` and `status` require at least 1 forecast, otherwise
-these fields are ommitted.
+these fields are omitted.
[float]
[[ml-stats-node]]
diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc
index 907efb94c1776..274037cae8f2f 100644
--- a/docs/reference/rollup/apis/rollup-caps.asciidoc
+++ b/docs/reference/rollup/apis/rollup-caps.asciidoc
@@ -45,7 +45,7 @@ For more information, see
==== Examples
Imagine we have an index named `sensor-1` full of raw data. We know that the data will grow over time, so there
-will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job that targets the index pattern `sensor-*` to accomodate
+will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job that targets the index pattern `sensor-*` to accommodate
this future scaling:
[source,js]
diff --git a/docs/reference/sql/concepts.asciidoc b/docs/reference/sql/concepts.asciidoc
index dab33618762cd..aceea1949a28a 100644
--- a/docs/reference/sql/concepts.asciidoc
+++ b/docs/reference/sql/concepts.asciidoc
@@ -62,4 +62,4 @@ Multiple clusters, each with its own namespace, connected to each other in a fed
|===
-As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangeably through-out the rest of the material.
\ No newline at end of file
+As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangeably through-out the rest of the material.
diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc
index a8a866ac93cb1..98589043f614d 100644
--- a/docs/reference/sql/endpoints/jdbc.asciidoc
+++ b/docs/reference/sql/endpoints/jdbc.asciidoc
@@ -44,7 +44,7 @@ from `artifacts.elastic.co/maven` by adding it to the repositories list:
=== Setup
The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`.
-Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically
+Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registered automatically
as long as its available in the classpath.
Once registered, the driver understands the following syntax as an URL:
@@ -182,4 +182,4 @@ connection. For example:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example]
---------------------------------------------------
\ No newline at end of file
+--------------------------------------------------
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java
index b8a1af073bf53..8ba8b79b74a92 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java
+++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java
@@ -143,7 +143,7 @@
* described by later documentation.
*
* Storebable nodes have three methods for writing -- setup, load, and store. These methods
- * are used in conjuction with a parent node aware of the storeable node (lhs) that has a node
+ * are used in conjunction with a parent node aware of the storeable node (lhs) that has a node
* representing a value to store (rhs). The setup method is always once called before a store
* to give storeable nodes a chance to write any prefixes they may have and any values such as
* array indices before the store happens. Load is called on a storeable node that must also
@@ -152,7 +152,7 @@
* Sub nodes are partial nodes that require a parent to work correctly. These nodes can really
* represent anything the parent node would like to split up into logical pieces and don't really
* have any distinct set of rules. The currently existing subnodes all have ANode as a super class
- * somewhere in their class heirachy so the parent node can defer some analysis and writing to
+ * somewhere in their class hierarchy so the parent node can defer some analysis and writing to
* the sub node.
*/
package org.elasticsearch.painless.node;
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java
index 1460d5f2359b6..444d1cd4f9400 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java
@@ -434,7 +434,7 @@ private static String javadocRoot(Class> clazz) {
if (classPackage.startsWith("org.apache.lucene")) {
return "lucene-core";
}
- throw new IllegalArgumentException("Unrecognized packge: " + classPackage);
+ throw new IllegalArgumentException("Unrecognized package: " + classPackage);
}
private static void emitGeneratedWarning(PrintStream stream) {
diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java
index 44cd6b5304dc4..e70d728091fab 100644
--- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java
+++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java
@@ -83,7 +83,7 @@ private static ContextSetup randomContextSetup() {
QueryBuilder query = randomBoolean() ? new MatchAllQueryBuilder() : null;
// TODO: pass down XContextType to createTestInstance() method.
// otherwise the document itself is different causing test failures.
- // This should be done in a seperate change as the test instance is created before xcontent type is randomly picked and
+ // This should be done in a separate change as the test instance is created before xcontent type is randomly picked and
// all the createTestInstance() methods need to be changed, which will make this a big chnage
// BytesReference doc = randomBoolean() ? new BytesArray("{}") : null;
BytesReference doc = null;
diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java
index 6efff154b6253..991d97b61968d 100644
--- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java
+++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java
@@ -42,8 +42,8 @@
/**
* Returns the results for a {@link RankEvalRequest}.
- * The repsonse contains a detailed section for each evaluation query in the request and
- * possible failures that happened when executin individual queries.
+ * The response contains a detailed section for each evaluation query in the request and
+ * possible failures that happened when execution individual queries.
**/
public class RankEvalResponse extends ActionResponse implements ToXContentObject {
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java
index 5c12e85bb4ce9..7efd1ee5d6e72 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java
@@ -481,7 +481,7 @@ public ScheduledFuture> schedule(TimeValue delay, String name, Runnable comman
/**
* Execute a bulk retry test case. The total number of failures is random and the number of retries attempted is set to
- * testRequest.getMaxRetries and controled by the failWithRejection parameter.
+ * testRequest.getMaxRetries and controlled by the failWithRejection parameter.
*/
private void bulkRetryTestCase(boolean failWithRejection) throws Exception {
int totalFailures = randomIntBetween(1, testRequest.getMaxRetries());
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java
index d2110c5cded14..6b7b21a55148d 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java
@@ -122,7 +122,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder, ?>
logger.debug("waiting for updates to be blocked");
boolean blocked = awaitBusy(
() -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0,
- 1, TimeUnit.MINUTES); // 10 seconds is usually fine but on heavilly loaded machines this can wake a while
+ 1, TimeUnit.MINUTES); // 10 seconds is usually fine but on heavily loaded machines this can take a while
assertTrue("updates blocked", blocked);
// Status should show the task running
diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java
index ce8a635ffb6f1..3bcefe0cf5680 100644
--- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java
+++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java
@@ -60,7 +60,7 @@ public void testBasicUsage() throws Exception {
String index = "foo";
String type = "mytype";
- String[] equilavent = {"I WİLL USE TURKİSH CASING", "ı will use turkish casıng"};
+ String[] equivalent = {"I WİLL USE TURKİSH CASING", "ı will use turkish casıng"};
XContentBuilder builder = jsonBuilder()
.startObject().startObject("properties")
@@ -75,8 +75,8 @@ public void testBasicUsage() throws Exception {
// both values should collate to same value
indexRandom(true,
- client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
- client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
+ client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
+ client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
);
// searching for either of the terms should return both results since they collate to the same value
@@ -85,7 +85,7 @@ public void testBasicUsage() throws Exception {
.types(type)
.source(new SearchSourceBuilder()
.fetchSource(false)
- .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
+ .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
.sort("collate")
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
);
@@ -100,7 +100,7 @@ public void testMultipleValues() throws Exception {
String index = "foo";
String type = "mytype";
- String[] equilavent = {"a", "C", "a", "B"};
+ String[] equivalent = {"a", "C", "a", "B"};
XContentBuilder builder = jsonBuilder()
.startObject().startObject("properties")
@@ -114,9 +114,9 @@ public void testMultipleValues() throws Exception {
// everything should be indexed fine, no exceptions
indexRandom(true,
- client().prepareIndex(index, type, "1").setSource("{\"collate\":[\"" + equilavent[0] + "\", \""
- + equilavent[1] + "\"]}", XContentType.JSON),
- client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[2] + "\"}", XContentType.JSON)
+ client().prepareIndex(index, type, "1").setSource("{\"collate\":[\"" + equivalent[0] + "\", \""
+ + equivalent[1] + "\"]}", XContentType.JSON),
+ client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON)
);
// using sort mode = max, values B and C will be used for the sort
@@ -161,7 +161,7 @@ public void testNormalization() throws Exception {
String index = "foo";
String type = "mytype";
- String[] equilavent = {"I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng"};
+ String[] equivalent = {"I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng"};
XContentBuilder builder = jsonBuilder()
.startObject().startObject("properties")
@@ -176,8 +176,8 @@ public void testNormalization() throws Exception {
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
indexRandom(true,
- client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
- client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
+ client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
+ client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
);
// searching for either of the terms should return both results since they collate to the same value
@@ -186,7 +186,7 @@ public void testNormalization() throws Exception {
.types(type)
.source(new SearchSourceBuilder()
.fetchSource(false)
- .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
+ .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
.sort("collate")
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
);
@@ -204,7 +204,7 @@ public void testSecondaryStrength() throws Exception {
String index = "foo";
String type = "mytype";
- String[] equilavent = {"TESTING", "testing"};
+ String[] equivalent = {"TESTING", "testing"};
XContentBuilder builder = jsonBuilder()
.startObject().startObject("properties")
@@ -219,8 +219,8 @@ public void testSecondaryStrength() throws Exception {
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
indexRandom(true,
- client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
- client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
+ client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
+ client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
);
SearchRequest request = new SearchRequest()
@@ -228,7 +228,7 @@ public void testSecondaryStrength() throws Exception {
.types(type)
.source(new SearchSourceBuilder()
.fetchSource(false)
- .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
+ .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
.sort("collate")
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
);
@@ -247,7 +247,7 @@ public void testIgnorePunctuation() throws Exception {
String index = "foo";
String type = "mytype";
- String[] equilavent = {"foo-bar", "foo bar"};
+ String[] equivalent = {"foo-bar", "foo bar"};
XContentBuilder builder = jsonBuilder()
.startObject().startObject("properties")
@@ -262,8 +262,8 @@ public void testIgnorePunctuation() throws Exception {
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
indexRandom(true,
- client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
- client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
+ client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
+ client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
);
SearchRequest request = new SearchRequest()
@@ -271,7 +271,7 @@ public void testIgnorePunctuation() throws Exception {
.types(type)
.source(new SearchSourceBuilder()
.fetchSource(false)
- .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
+ .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
.sort("collate")
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
);
@@ -467,7 +467,7 @@ public void testCustomRules() throws Exception {
RuleBasedCollator tailoredCollator = new RuleBasedCollator(baseCollator.getRules() + DIN5007_2_tailorings);
String tailoredRules = tailoredCollator.getRules();
- String[] equilavent = {"Töne", "Toene"};
+ String[] equivalent = {"Töne", "Toene"};
XContentBuilder builder = jsonBuilder()
.startObject().startObject("properties")
@@ -481,8 +481,8 @@ public void testCustomRules() throws Exception {
assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
indexRandom(true,
- client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equilavent[0] + "\"}", XContentType.JSON),
- client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equilavent[1] + "\"}", XContentType.JSON)
+ client().prepareIndex(index, type, "1").setSource("{\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON),
+ client().prepareIndex(index, type, "2").setSource("{\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON)
);
SearchRequest request = new SearchRequest()
@@ -490,7 +490,7 @@ public void testCustomRules() throws Exception {
.types(type)
.source(new SearchSourceBuilder()
.fetchSource(false)
- .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1]))
+ .query(QueryBuilders.termQuery("collate", randomBoolean() ? equivalent[0] : equivalent[1]))
.sort("collate", SortOrder.ASC)
.sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value
);
diff --git a/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/20_suggest.yml b/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/20_suggest.yml
index 3731a8b211268..bac4e1014ef15 100644
--- a/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/20_suggest.yml
+++ b/plugins/examples/custom-suggester/src/test/resources/rest-api-spec/test/custom-suggester/20_suggest.yml
@@ -1,7 +1,7 @@
# tests that the custom suggester works
# the issue that prompted serializing Suggestion as a registered named writeable was not revealed until
-# a user found that it would fail when reducing suggestions in a multi node envrionment
+# a user found that it would fail when reducing suggestions in a multi node environment
# https://github.com/elastic/elasticsearch/issues/26585
"test custom suggester":
- do:
diff --git a/plugins/ingest-user-agent/src/main/resources/regexes.yml b/plugins/ingest-user-agent/src/main/resources/regexes.yml
index 6c3369dc2f769..d9c16403cb8d2 100644
--- a/plugins/ingest-user-agent/src/main/resources/regexes.yml
+++ b/plugins/ingest-user-agent/src/main/resources/regexes.yml
@@ -748,7 +748,7 @@ os_parsers:
# possibility of false positive when different marketing names share same NT kernel
# e.g. windows server 2003 and windows xp
# lots of ua strings have Windows NT 4.1 !?!?!?!? !?!? !? !????!?! !!! ??? !?!?! ?
- # (very) roughly ordered in terms of frequency of occurence of regex (win xp currently most frequent, etc)
+ # (very) roughly ordered in terms of frequency of occurrence of regex (win xp currently most frequent, etc)
##########
# ie mobile desktop mode
@@ -2848,7 +2848,7 @@ device_parsers:
device_replacement: 'Micromax $1'
brand_replacement: 'Micromax'
model_replacement: '$1'
- # be carefull here with Acer e.g. A500
+ # be careful here with Acer e.g. A500
- regex: '; *(A\d{2}|A[12]\d{2}|A90S|A110Q) Build'
regex_flag: 'i'
device_replacement: 'Micromax $1'
diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml
index 04d85eb607835..9b899fe800ce2 100644
--- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml
+++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml
@@ -1,5 +1,5 @@
---
-"Create things in the cluster state that we'll validate are there after the ugprade":
+"Create things in the cluster state that we'll validate are there after the upgrade":
- do:
snapshot.create_repository:
repository: my_repo
diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats
index a7628d08bbaff..db062eb337e74 100644
--- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats
+++ b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats
@@ -190,7 +190,7 @@ setup() {
@test "[SYSTEMD] start Elasticsearch with custom JVM options" {
assert_file_exist $ESENVFILE
# The custom config directory is not under /tmp or /var/tmp because
- # systemd's private temp directory functionaly means different
+ # systemd's private temp directory functionally means different
# processes can have different views of what's in these directories
local temp=`mktemp -p /etc -d`
cp "$ESCONFIG"/elasticsearch.yml "$temp"
diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash
index 9a1ff6f2e2349..622f570769caa 100644
--- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash
+++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash
@@ -97,7 +97,7 @@ fi
rm -rf "$ESPLUGINS"
# The custom plugins directory is not under /tmp or /var/tmp because
- # systemd's private temp directory functionaly means different
+ # systemd's private temp directory functionally means different
# processes can have different views of what's in these directories
local es_plugins=$(mktemp -p /var -d -t 'plugins.XXXX')
chown -R elasticsearch:elasticsearch "$es_plugins"
diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash
index cb71e9e6ec1a0..f5a9f25df16f3 100644
--- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash
+++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash
@@ -556,7 +556,7 @@ run_elasticsearch_tests() {
move_config() {
local oldConfig="$ESCONFIG"
# The custom config directory is not under /tmp or /var/tmp because
- # systemd's private temp directory functionaly means different
+ # systemd's private temp directory functionally means different
# processes can have different views of what's in these directories
export ESCONFIG="${1:-$(mktemp -p /etc -d -t 'config.XXXX')}"
echo "Moving configuration directory from $oldConfig to $ESCONFIG"
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
index 9b4d232f23ca6..7d08c9f864e33 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
@@ -328,7 +328,7 @@ public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reduce
SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
if (searchResultProvider == null) {
// this can happen if we are hitting a shard failure during the fetch phase
- // in this case we referenced the shard result via teh ScoreDoc but never got a
+ // in this case we referenced the shard result via the ScoreDoc but never got a
// result from fetch.
// TODO it would be nice to assert this in the future
continue;
@@ -380,7 +380,7 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr
SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
if (fetchResultProvider == null) {
// this can happen if we are hitting a shard failure during the fetch phase
- // in this case we referenced the shard result via teh ScoreDoc but never got a
+ // in this case we referenced the shard result via the ScoreDoc but never got a
// result from fetch.
// TODO it would be nice to assert this in the future
continue;
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java
index e428df66f1447..9bd2a5c2f3dc7 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/Natives.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/Natives.java
@@ -84,7 +84,7 @@ static void tryVirtualLock() {
*/
static String getShortPathName(final String path) {
if (!JNA_AVAILABLE) {
- logger.warn("cannot obtain short path for [{}] because JNA is not avilable", path);
+ logger.warn("cannot obtain short path for [{}] because JNA is not available", path);
return path;
}
return JNANatives.getShortPathName(path);
@@ -123,7 +123,7 @@ static void trySetMaxNumberOfThreads() {
static void trySetMaxSizeVirtualMemory() {
if (!JNA_AVAILABLE) {
- logger.warn("cannot getrlimit RLIMIT_AS beacuse JNA is not available");
+ logger.warn("cannot getrlimit RLIMIT_AS because JNA is not available");
return;
}
JNANatives.trySetMaxSizeVirtualMemory();
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
index 09fde36e1f983..d0824ea7d8f35 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java
@@ -367,7 +367,7 @@ public String[] indexAliases(ClusterState state, String index, Predicate2014-11-18||-2y substracts two years from the input date
+ * 2014-11-18||-2y subtracts two years from the input date
* now/m rounds the current time to minute granularity
*
* Supported rounding units are
diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java
index 84eb0c4c3498c..a3fddce3e060f 100644
--- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java
+++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java
@@ -41,7 +41,7 @@ public static PreConfiguredCharFilter singleton(String name, boolean useFilterFo
}
/**
- * Create a pre-configured char filter that may not vary at all, provide access to the elasticsearch verison
+ * Create a pre-configured char filter that may not vary at all, provide access to the elasticsearch version
*/
public static PreConfiguredCharFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries,
BiFunction create) {
diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
index b2e6f98f1268f..4aa9bc5ce146c 100644
--- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
+++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
@@ -150,7 +150,7 @@ public ActionRequestValidationException validate() {
e = addValidationError("stored_fields is not supported in this context", e);
}
if (maxRetries < 0) {
- e = addValidationError("retries cannnot be negative", e);
+ e = addValidationError("retries cannot be negative", e);
}
if (false == (size == -1 || size > 0)) {
e = addValidationError(
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index 11d8f44bef133..17756630517d2 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -791,7 +791,7 @@ private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm,
+ "]";
ensureWriteAllowed(origin);
// When there is a single type, the unique identifier is only composed of the _id,
- // so there is no way to differenciate foo#1 from bar#1. This is especially an issue
+ // so there is no way to differentiate foo#1 from bar#1. This is especially an issue
// if a user first deletes foo#1 and then indexes bar#1: since we do not encode the
// _type in the uid it might look like we are reindexing the same document, which
// would fail if bar#1 is indexed with a lower version than foo#1 was deleted with.
diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java
index 13499dfd60c23..18ccb988b8c83 100644
--- a/server/src/main/java/org/elasticsearch/index/store/Store.java
+++ b/server/src/main/java/org/elasticsearch/index/store/Store.java
@@ -1047,7 +1047,7 @@ public Map getCommitUserData() {
}
/**
- * returns the history uuid the store points at, or null if not existant.
+ * returns the history uuid the store points at, or null if nonexistent.
*/
public String getHistoryUUID() {
return commitUserData.get(Engine.HISTORY_UUID_KEY);
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java
index 8dd5ddcee3be3..1f87792745572 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogStats.java
@@ -85,12 +85,12 @@ public int estimatedNumberOfOperations() {
return numberOfOperations;
}
- /** the size of the generations in the translog that weren't yet to comitted to lucene */
+ /** the size of the generations in the translog that weren't yet to committed to lucene */
public long getUncommittedSizeInBytes() {
return uncommittedSizeInBytes;
}
- /** the number of operations in generations of the translog that weren't yet to comitted to lucene */
+ /** the number of operations in generations of the translog that weren't yet to committed to lucene */
public int getUncommittedOperations() {
return uncommittedOperations;
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java
index c8b1e630b8549..c7d500e81ca26 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java
@@ -50,7 +50,7 @@ public FilterAggregatorFactory(String name, QueryBuilder filterBuilder, SearchCo
* Returns the {@link Weight} for this filter aggregation, creating it if
* necessary. This is done lazily so that the {@link Weight} is only created
* if the aggregation collects documents reducing the overhead of the
- * aggregation in teh case where no documents are collected.
+ * aggregation in the case where no documents are collected.
*
* Note that as aggregations are initialsed and executed in a serial manner,
* no concurrency considerations are necessary here.
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java
index 0b3204e193b07..583516c5cd4c2 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java
@@ -53,7 +53,7 @@ public static WeightedFragInfo fixWeightedFragInfo(MappedFieldType fieldType, Fi
if (!fragInfo.getSubInfos().isEmpty() && containsBrokenAnalysis(fieldType.indexAnalyzer())) {
/* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
* which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
- * the fragments based on their offsets rather than using soley the positions as it is done in
+ * the fragments based on their offsets rather than using solely the positions as it is done in
* the FastVectorHighlighter. Yet, this is really a lucene problem and should be fixed in lucene rather
* than in this hack... aka. "we are are working on in!" */
final List subInfos = fragInfo.getSubInfos();
diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java
index 90adf2ab9e7d4..fc1f0c9e5ec0f 100644
--- a/server/src/main/java/org/elasticsearch/transport/Transport.java
+++ b/server/src/main/java/org/elasticsearch/transport/Transport.java
@@ -150,7 +150,7 @@ default Object getCacheKey() {
}
/**
- * This class represents a response context that encapsulates the actual response handler, the action and the conneciton it was
+ * This class represents a response context that encapsulates the actual response handler, the action and the connection it was
* executed on.
*/
final class ResponseContext {
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
index f1842b5b0dd1d..1b2c238098e50 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
@@ -149,7 +149,7 @@ public void testSimpleBulk7() throws Exception {
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
() -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON));
assertThat(exc.getMessage(),
- containsString("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"));
+ containsString("Malformed action/metadata line [5], expected a simple value for field [_unknown] but found [START_ARRAY]"));
}
public void testSimpleBulk8() throws Exception {
diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
index 6c084cb29cd9c..ff868c3250aef 100644
--- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
@@ -1020,7 +1020,7 @@ protected ReplicaResult shardOperationOnReplica(Request request, IndexShard repl
// publish a new state (same as the old state with the version incremented)
setState(clusterService, stateWithNodes);
- // Assert that the request was retried, this time successfull
+ // Assert that the request was retried, this time successful
assertTrue("action should have been successfully called on retry but was not", calledSuccessfully.get());
transportService.stop();
}
diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java
index de3223517b92f..7e74a35cf5584 100644
--- a/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java
@@ -599,7 +599,7 @@ public void testAdaptiveReplicaSelection() throws Exception {
collector.addNodeStatistics("node_1", 4, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos());
groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests);
shardChoice = groupIterator.get(0).nextOrNull();
- // finally, node 2 is choosen instead
+ // finally, node 2 is chosen instead
assertThat(shardChoice.currentNodeId(), equalTo("node_2"));
IOUtils.close(clusterService);
diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java
index ba6fe5b9a5a3b..4d5639a05ea07 100644
--- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java
@@ -63,7 +63,7 @@ public void testFilterInitialRecovery() {
"node2").build());
RoutingTable routingTable = state.routingTable();
- // we can initally only allocate on node2
+ // we can initially only allocate on node2
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).currentNodeId(), "node2");
routingTable = service.applyFailedShard(state, routingTable.index("idx").shard(0).shards().get(0), randomBoolean()).routingTable();
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java
index 9e31bd76c3016..b17abcc17b359 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldCopyToMapperTests.java
@@ -37,7 +37,7 @@ public class MultiFieldCopyToMapperTests extends ESTestCase {
public void testExceptionForCopyToInMultiFields() throws IOException {
XContentBuilder mapping = createMappinmgWithCopyToInMultiField();
- // first check that for newer versions we throw exception if copy_to is found withing multi field
+ // first check that for newer versions we throw exception if copy_to is found within multi field
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "test");
try {
mapperService.parse("type", new CompressedXContent(Strings.toString(mapping)), true);
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
index 56a14da845fff..5c610feba1922 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
@@ -579,7 +579,7 @@ public void testCircuitBreakerIncrementedByIndexShard() throws Exception {
// Generate a couple of segments
client().prepareIndex("test", "_doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).get();
- // Use routing so 2 documents are guarenteed to be on the same shard
+ // Use routing so 2 documents are guaranteed to be on the same shard
String routing = randomAlphaOfLength(5);
client().prepareIndex("test", "_doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
index 9d18845a05e33..19c25fed0725d 100644
--- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
+++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
@@ -2323,7 +2323,7 @@ ChannelFactory getChannelFactory() {
@Override
void deleteReaderFiles(TranslogReader reader) {
if (fail.fail()) {
- // simulate going OOM and dieing just at the wrong moment.
+ // simulate going OOM and dying just at the wrong moment.
throw new RuntimeException("simulated");
} else {
super.deleteReaderFiles(reader);
diff --git a/server/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java b/server/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java
index 4a44c518051a2..aceee1474c60e 100644
--- a/server/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java
+++ b/server/src/test/java/org/elasticsearch/monitor/process/ProcessProbeTests.java
@@ -69,7 +69,7 @@ public void testProcessStats() {
ProcessStats.Mem mem = stats.getMem();
assertNotNull(mem);
- // Commited total virtual memory can return -1 if not supported, let's see which platforms fail
+ // Committed total virtual memory can return -1 if not supported, let's see which platforms fail
assertThat(mem.getTotalVirtual().getBytes(), greaterThan(0L));
}
}
diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java
index d1918c170ed37..7d10f49ea8610 100644
--- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java
+++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java
@@ -113,7 +113,7 @@ private static Object randomValue(Supplier