diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java
index 4724c576066f1..c62089a42b159 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java
@@ -39,7 +39,7 @@
* }
* }
*
- * Will copy the entire core Rest API specifications (assuming the project has tests) and any of the the X-pack specs starting with enrich*.
+ * Will copy the entire core Rest API specifications (assuming the project has tests) and any of the X-pack specs starting with enrich*.
* It is recommended (but not required) to also explicitly declare which core specs your project depends on to help optimize the caching
* behavior.
* For example:
@@ -66,7 +66,7 @@
* }
* }
*
- * Will copy any of the the x-pack tests that start with graph, and will copy the X-pack graph specification, as well as the full core
+ * Will copy any of the x-pack tests that start with graph, and will copy the X-pack graph specification, as well as the full core
* Rest API specification.
*
* Additionally you can specify which sourceSetName resources should be copied to. The default is the yamlRestTest source set.
diff --git a/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc b/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc
index a1f7048705555..12050ff17e279 100644
--- a/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc
+++ b/docs/reference/connector/docs/connectors-hosted-tutorial-mongo.asciidoc
@@ -90,7 +90,7 @@ Find this by https://www.mongodb.com/docs/atlas/tutorial/connect-to-your-cluster
In this example, we'll use the `sample_mflix` database.
* *Collection*: The name of the collection you want to sync.
In this example, we'll use the `comments` collection of the `sample_mflix` database.
-* *Username*: The username you created earlier, in the the setup phase.
+* *Username*: The username you created earlier, in the setup phase.
* *Password*: The password you created earlier.
Keep these details handy!
diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc
index b1582721ad0e0..63b8738266132 100644
--- a/docs/reference/esql/esql-query-api.asciidoc
+++ b/docs/reference/esql/esql-query-api.asciidoc
@@ -46,7 +46,7 @@ supports this parameter for CSV responses.
`drop_null_columns`::
(Optional, boolean) Should columns that are entirely `null` be removed from
the `columns` and `values` portion of the results? Defaults to `false`. If
-`true` the the response will include an extra section under the name
+`true` the response will include an extra section under the name
`all_columns` which has the name of all columns.
`format`::
diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc
index 4ee6c429ce730..33692fd182fa7 100644
--- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc
+++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc
@@ -209,7 +209,7 @@ value.
(string) Reserved for future use, currently set to `anomaly_detector`.
`job_version`::
-(string) The {ml} configuration version number at which the the job was created.
+(string) The {ml} configuration version number at which the job was created.
NOTE: From {es} 8.10.0, a new version number is used to
track the configuration and state changes in the {ml} plugin. This new
diff --git a/docs/reference/reranking/learning-to-rank-model-training.asciidoc b/docs/reference/reranking/learning-to-rank-model-training.asciidoc
index 8e0b3f9ae94ce..9a35573a0879d 100644
--- a/docs/reference/reranking/learning-to-rank-model-training.asciidoc
+++ b/docs/reference/reranking/learning-to-rank-model-training.asciidoc
@@ -43,7 +43,7 @@ feature_extractors=[
feature_name="title_bm25",
query={"match": {"title": "{{query}}"}}
),
- # We want to use the the number of matched terms in the title field as a feature:
+ # We want to use the number of matched terms in the title field as a feature:
QueryFeatureExtractor(
feature_name="title_matched_term_count",
query={
diff --git a/docs/reference/search-application/apis/put-search-application.asciidoc b/docs/reference/search-application/apis/put-search-application.asciidoc
index eb559acc8cdc7..dc5e20ec40b7f 100644
--- a/docs/reference/search-application/apis/put-search-application.asciidoc
+++ b/docs/reference/search-application/apis/put-search-application.asciidoc
@@ -192,7 +192,7 @@ When the above `dictionary` parameter is specified, the <> API will return an error.
+If the parameters are not valid, the <> API will return an error.
[source,console]
----
POST _application/search_application/my-app/_search
diff --git a/docs/reference/security/securing-communications/security-minimal-setup.asciidoc b/docs/reference/security/securing-communications/security-minimal-setup.asciidoc
index ee158294df03c..fd54c37d9e8fa 100644
--- a/docs/reference/security/securing-communications/security-minimal-setup.asciidoc
+++ b/docs/reference/security/securing-communications/security-minimal-setup.asciidoc
@@ -78,7 +78,7 @@ This command resets the password to an auto-generated value.
./bin/elasticsearch-reset-password -u elastic
----
+
-If you want to set the password to a specific value, run the command with the
+If you want to set the password to a specific value, run the command with the
interactive (`-i`) parameter.
+
[source,shell]
@@ -93,7 +93,7 @@ interactive (`-i`) parameter.
./bin/elasticsearch-reset-password -u kibana_system
----
-. Save the new passwords. In the next step, you'll add the the password for the
+. Save the new passwords. In the next step, you'll add the password for the
`kibana_system` user to {kib}.
*Next*: <>
diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
index a3b495b9c3e38..909cf808d1f34 100644
--- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
+++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
@@ -370,7 +370,7 @@ private static void skipToListStart(XContentParser parser) throws IOException {
}
}
- // read a list without bounds checks, assuming the the current parser is always on an array start
+ // read a list without bounds checks, assuming the current parser is always on an array start
private static List readListUnsafe(XContentParser parser, Supplier> mapFactory) throws IOException {
assert parser.currentToken() == Token.START_ARRAY;
ArrayList list = new ArrayList<>();
diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java
index 36eb1d61e21d7..1ebd6f920d518 100644
--- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java
+++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java
@@ -335,7 +335,7 @@ public void refresh() {
* Customizes {@link com.amazonaws.auth.WebIdentityTokenCredentialsProvider}
*
*
- * Reads the the location of the web identity token not from AWS_WEB_IDENTITY_TOKEN_FILE, but from a symlink
+ * Reads the location of the web identity token not from AWS_WEB_IDENTITY_TOKEN_FILE, but from a symlink
* in the plugin directory, so we don't need to create a hardcoded read file permission for the plugin.
* Supports customization of the STS endpoint via a system property, so we can test it against a test fixture.
* Supports gracefully shutting down the provider and the STS client.
diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java
index 76140da810c2b..2b8fb0d4e6021 100644
--- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java
+++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/FileUtils.java
@@ -373,7 +373,7 @@ public static String escapePath(Path path) {
}
/**
- * Recursively copy the the source directory to the target directory, preserving permissions.
+ * Recursively copy the source directory to the target directory, preserving permissions.
*/
public static void copyDirectory(Path source, Path target) throws IOException {
Files.walkFileTree(source, new SimpleFileVisitor<>() {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
index fbc4e08b6b78a..de565605ff58a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
@@ -70,7 +70,7 @@ public void testSystemIndexManagerUpgradesMappings() throws Exception {
}
/**
- * Check that if the the SystemIndexManager finds a managed index with mappings that claim to be newer than
+ * Check that if the SystemIndexManager finds a managed index with mappings that claim to be newer than
* what it expects, then those mappings are left alone.
*/
public void testSystemIndexManagerLeavesNewerMappingsAlone() throws Exception {
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java
index b7777eca86179..6d2e9c37fc625 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java
@@ -2402,7 +2402,7 @@ public Metadata build(boolean skipNameCollisionChecks) {
assert previousIndicesLookup.equals(buildIndicesLookup(dataStreamMetadata(), indicesMap));
indicesLookup = previousIndicesLookup;
} else if (skipNameCollisionChecks == false) {
- // we have changes to the the entity names so we ensure we have no naming collisions
+ // we have changes to the entity names so we ensure we have no naming collisions
ensureNoNameCollisions(aliasedIndices.keySet(), indicesMap, dataStreamMetadata());
}
assert assertDataStreams(indicesMap, dataStreamMetadata());
diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java
index 2abfee670b950..09773ba1de35e 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java
@@ -242,7 +242,7 @@ public Settings getByPrefix(String prefix) {
if (prefix.isEmpty()) {
return this;
}
- // create the the next prefix right after the given prefix, and use it as exclusive upper bound for the sub-map to filter by prefix
+ // create the next prefix right after the given prefix, and use it as exclusive upper bound for the sub-map to filter by prefix
// below
char[] toPrefixCharArr = prefix.toCharArray();
toPrefixCharArr[toPrefixCharArr.length - 1]++;
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
index fbfed8a75a146..30fea41330038 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java
@@ -954,7 +954,7 @@ private void notifyFailureOnceAllOutstandingRequestAreDone(Exception e) {
void createRetentionLease(final long startingSeqNo, ActionListener listener) {
updateRetentionLease(syncListener -> {
- // Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the the local
+ // Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the local
// checkpoint of the safe commit we're creating and this lease's retained seqno with the retention lock, and by cloning an
// existing lease we (approximately) know that all our peers are also retaining history as requested by the cloned lease. If
// the recovery now fails before copying enough history over then a subsequent attempt will find this lease, determine it is
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 435bf71e3b2c9..b43fe05a541f6 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -435,7 +435,7 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) {
/**
* Flag that is set to {@code true} if this instance is started with {@link #metadata} that has a higher value for
* {@link RepositoryMetadata#pendingGeneration()} than for {@link RepositoryMetadata#generation()} indicating a full cluster restart
- * potentially accounting for the the last {@code index-N} write in the cluster state.
+ * potentially accounting for the last {@code index-N} write in the cluster state.
* Note: While it is true that this value could also be set to {@code true} for an instance on a node that is just joining the cluster
* during a new {@code index-N} write, this does not present a problem. The node will still load the correct {@link RepositoryData} in
* all cases and simply do a redundant listing of the repository contents if it tries to load {@link RepositoryData} and falls back
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java
index bcbbad81f1ca1..0e2066f58e25d 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChunkedBlobOutputStream.java
@@ -128,7 +128,7 @@ protected final void finishPart(T partId) {
}
/**
- * Write the contents of {@link #buffer} to storage. Implementations should call {@link #finishPart} at the end to track the the chunk
+ * Write the contents of {@link #buffer} to storage. Implementations should call {@link #finishPart} at the end to track the chunk
* of data just written and ready {@link #buffer} for the next write.
*/
protected abstract void flushBuffer() throws IOException;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java
index 8ebf55487ed59..cc8ecc74b5ea0 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java
@@ -132,7 +132,7 @@ final void add(QueryToFilterAdapter filter) throws IOException {
}
/**
- * Build the the adapter or {@code null} if the this isn't a valid rewrite.
+ * Build the adapter or {@code null} if this isn't a valid rewrite.
*/
public final T build() throws IOException {
if (false == valid || aggCtx.enableRewriteToFilterByFilter() == false) {
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java
index 1ff7529bf3188..080cac9cbfb85 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java
@@ -123,7 +123,7 @@ private static SignificantTermsAggregatorSupplier bytesSupplier() {
/**
* Whether the aggregation will execute. If the main query matches no documents and parent aggregation isn't a global or terms
- * aggregation with min_doc_count = 0, the the aggregator will not really execute. In those cases it doesn't make sense to load
+ * aggregation with min_doc_count = 0, the aggregator will not really execute. In those cases it doesn't make sense to load
* global ordinals.
*
* Some searches that will never match can still fall through and we endup running query that will produce no results.
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
index 4f939ea294e48..c720f3d9465a3 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
@@ -556,7 +556,7 @@ public synchronized void removeReleasable(Aggregator aggregator) {
// Removing an aggregator is done after calling Aggregator#buildTopLevel which happens on an executor thread.
// We need to synchronize the removal because he AggregatorContext it is shared between executor threads.
assert releaseMe.contains(aggregator)
- : "removing non-existing aggregator [" + aggregator.name() + "] from the the aggregation context";
+ : "removing non-existing aggregator [" + aggregator.name() + "] from the aggregation context";
releaseMe.remove(aggregator);
}
diff --git a/server/src/main/java/org/elasticsearch/snapshots/package-info.java b/server/src/main/java/org/elasticsearch/snapshots/package-info.java
index 694c9c5c9062b..d73a1d9bd701a 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/package-info.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/package-info.java
@@ -113,7 +113,7 @@
* snapshots, we load the {@link org.elasticsearch.snapshots.SnapshotInfo} for the source snapshot and check for shard snapshot
* failures of the relevant indices.
*
Once all shard counts are known and the health of all source indices data has been verified, we populate the
- * {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the the relevant shard clone tasks.
+ * {@code SnapshotsInProgress.Entry#clones} map for the clone operation with the relevant shard clone tasks.
* After the clone tasks have been added to the {@code SnapshotsInProgress.Entry}, master executes them on its snapshot thread-pool
* by invoking {@link org.elasticsearch.repositories.Repository#cloneShardSnapshot} for each shard that is to be cloned. Each completed
* shard snapshot triggers a call to the {@link org.elasticsearch.snapshots.SnapshotsService#masterServiceTaskQueue} which updates the
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java
index c0bf8f7c3bf12..5fa138abca809 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java
@@ -33,7 +33,7 @@
public class NodeInfoTests extends ESTestCase {
/**
- * Check that the the {@link NodeInfo#getInfo(Class)} method returns null
+ * Check that the {@link NodeInfo#getInfo(Class)} method returns null
* for absent info objects, and returns the right thing for present info
* objects.
*/
diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java
index 80aa142069358..dc46f862923e5 100644
--- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinReasonServiceTests.java
@@ -144,7 +144,7 @@ public void testCleanup() {
discoveryNodes[i] = randomDiscoveryNode();
}
- // we stop tracking the the oldest absent node(s) when only 1/3 of the tracked nodes are present
+ // we stop tracking the oldest absent node(s) when only 1/3 of the tracked nodes are present
final int cleanupNodeCount = (discoveryNodes.length - 2) / 3;
final DiscoveryNodes.Builder cleanupNodesBuilder = new DiscoveryNodes.Builder().add(masterNode)
diff --git a/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java b/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java
index 9711fb2c0f7fd..e3af6695bde1d 100644
--- a/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java
+++ b/server/src/test/java/org/elasticsearch/common/LocalTimeOffsetTests.java
@@ -275,7 +275,7 @@ private static long time(String time, ZoneId zone) {
}
/**
- * The the last "fully defined" transitions in the provided {@linkplain ZoneId}.
+ * The last "fully defined" transitions in the provided {@linkplain ZoneId}.
*/
private static ZoneOffsetTransition lastTransitionIn(ZoneId zone) {
List transitions = zone.getRules().getTransitions();
diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java
index 610e87b50d365..0a8fbcf6d56b9 100644
--- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java
@@ -747,7 +747,7 @@ public void testAllocationBucketsBreaker() {
// make sure used bytes is greater than the total circuit breaker limit
breaker.addWithoutBreaking(200);
- // make sure that we check on the the following call
+ // make sure that we check on the following call
for (int i = 0; i < 1023; i++) {
multiBucketConsumer.accept(0);
}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java
index adb9c6f1e4ca0..5e82cb7edfeac 100644
--- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractNXYSignificanceHeuristicTestCase.java
@@ -27,7 +27,7 @@ protected SignificanceHeuristic getHeuristic() {
/**
* @param includeNegatives value for this test run, should the scores include negative values.
* @param backgroundIsSuperset value for this test run, indicates in NXY significant terms if the background is indeed
- * a superset of the the subset, or is instead a disjoint set
+ * a superset of the subset, or is instead a disjoint set
* @return A random instance of an NXY heuristic to test
*/
protected abstract SignificanceHeuristic getHeuristic(boolean includeNegatives, boolean backgroundIsSuperset);
diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java
index 075621e8cdccb..a253b6bdd2360 100644
--- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java
+++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java
@@ -64,7 +64,7 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) {
),
meterRegistry.registerDoubleHistogram(
"es.blob_cache.population.throughput.histogram",
- "The throughput observed when populating the the cache",
+ "The throughput observed when populating the cache",
"MiB/second"
),
meterRegistry.registerLongCounter(
diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java
index 5361f1e8d1974..2e71c2c0a1ad3 100644
--- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java
+++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/async/AsyncTaskManagementServiceTests.java
@@ -313,7 +313,7 @@ public void execute(TestRequest request, TestTask task, ActionListener response = getResponse(responseHolder.get().id, TimeValue.ZERO);
if (success) {
assertThat(response.getException(), nullValue());
diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java
index 65e5efbccb1af..018504785b5eb 100644
--- a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java
+++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementServiceTests.java
@@ -313,7 +313,7 @@ public void execute(TestRequest request, TestTask task, ActionListener response = getResponse(responseHolder.get().id, TimeValue.ZERO);
if (success) {
assertThat(response.getException(), nullValue());
diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java
index 8b63b76cdf248..7d120e62e0260 100644
--- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java
+++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java
@@ -396,7 +396,7 @@ public void onFailure(Exception e) {
});
assertUnblockIn10s(latch2);
- // the the client answer
+ // the client answer
unblock.countDown();
}
}
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
index 0cbd7f1389188..207f92759fa34 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java
@@ -70,7 +70,7 @@ public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryB
// set page size
if (size != null) {
int sz = container.limit() > 0 ? Math.min(container.limit(), size) : size;
- // now take into account the the minimum page (if set)
+ // now take into account the minimum page (if set)
// that is, return the multiple of the minimum page size closer to the set size
int minSize = container.minPageSize();
sz = minSize > 0 ? (Math.max(sz / minSize, 1) * minSize) : sz;
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java
index f76702f5ffe5d..c12a172453941 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java
@@ -139,7 +139,7 @@ private Object handleTargetType(Object object) {
return DateUtils.asDateTimeWithMillis(((Number) object).longValue(), zoneId);
} else if (dataType.isInteger()) {
// MIN and MAX need to return the same type as field's and SUM a long for integral types, but ES returns them always as
- // floating points -> convert them in the the SELECT pipeline, if needed
+ // floating points -> convert them in the SELECT pipeline, if needed
return convert(object, dataType);
}
}