diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml
index 776b1ab944f69..bb3c75f10aaea 100644
--- a/.buildkite/pipelines/intake.yml
+++ b/.buildkite/pipelines/intake.yml
@@ -62,7 +62,7 @@ steps:
timeout_in_minutes: 300
matrix:
setup:
- BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"]
+ BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml
index e9c743885d78d..12729a9b6ebda 100644
--- a/.buildkite/pipelines/periodic-packaging.yml
+++ b/.buildkite/pipelines/periodic-packaging.yml
@@ -322,8 +322,8 @@ steps:
env:
BWC_VERSION: 7.16.3
- - label: "{{matrix.image}} / 7.17.23 / packaging-tests-upgrade"
- command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.23
+ - label: "{{matrix.image}} / 7.17.24 / packaging-tests-upgrade"
+ command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.24
timeout_in_minutes: 300
matrix:
setup:
@@ -337,7 +337,7 @@ steps:
buildDirectory: /dev/shm/bk
diskSizeGb: 250
env:
- BWC_VERSION: 7.17.23
+ BWC_VERSION: 7.17.24
- label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade"
command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1
@@ -577,8 +577,8 @@ steps:
env:
BWC_VERSION: 8.13.4
- - label: "{{matrix.image}} / 8.14.4 / packaging-tests-upgrade"
- command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.4
+ - label: "{{matrix.image}} / 8.14.3 / packaging-tests-upgrade"
+ command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.3
timeout_in_minutes: 300
matrix:
setup:
@@ -592,10 +592,10 @@ steps:
buildDirectory: /dev/shm/bk
diskSizeGb: 250
env:
- BWC_VERSION: 8.14.4
+ BWC_VERSION: 8.14.3
- - label: "{{matrix.image}} / 8.15.0 / packaging-tests-upgrade"
- command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.0
+ - label: "{{matrix.image}} / 8.15.1 / packaging-tests-upgrade"
+ command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.1
timeout_in_minutes: 300
matrix:
setup:
@@ -609,7 +609,7 @@ steps:
buildDirectory: /dev/shm/bk
diskSizeGb: 250
env:
- BWC_VERSION: 8.15.0
+ BWC_VERSION: 8.15.1
- label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade"
command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0
diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml
index f908b946bb523..740fec13d1790 100644
--- a/.buildkite/pipelines/periodic.yml
+++ b/.buildkite/pipelines/periodic.yml
@@ -342,8 +342,8 @@ steps:
- signal_reason: agent_stop
limit: 3
- - label: 7.17.23 / bwc
- command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.23#bwcTest
+ - label: 7.17.24 / bwc
+ command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.24#bwcTest
timeout_in_minutes: 300
agents:
provider: gcp
@@ -353,7 +353,7 @@ steps:
preemptible: true
diskSizeGb: 250
env:
- BWC_VERSION: 7.17.23
+ BWC_VERSION: 7.17.24
retry:
automatic:
- exit_status: "-1"
@@ -642,8 +642,8 @@ steps:
- signal_reason: agent_stop
limit: 3
- - label: 8.14.4 / bwc
- command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.4#bwcTest
+ - label: 8.14.3 / bwc
+ command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.3#bwcTest
timeout_in_minutes: 300
agents:
provider: gcp
@@ -653,7 +653,7 @@ steps:
preemptible: true
diskSizeGb: 250
env:
- BWC_VERSION: 8.14.4
+ BWC_VERSION: 8.14.3
retry:
automatic:
- exit_status: "-1"
@@ -662,8 +662,8 @@ steps:
- signal_reason: agent_stop
limit: 3
- - label: 8.15.0 / bwc
- command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.0#bwcTest
+ - label: 8.15.1 / bwc
+ command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.1#bwcTest
timeout_in_minutes: 300
agents:
provider: gcp
@@ -673,7 +673,7 @@ steps:
preemptible: true
diskSizeGb: 250
env:
- BWC_VERSION: 8.15.0
+ BWC_VERSION: 8.15.1
retry:
automatic:
- exit_status: "-1"
@@ -771,7 +771,7 @@ steps:
setup:
ES_RUNTIME_JAVA:
- openjdk17
- BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"]
+ BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
@@ -821,7 +821,7 @@ steps:
- openjdk21
- openjdk22
- openjdk23
- BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"]
+ BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"]
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diff --git a/.buildkite/scripts/gradle-build-cache-validation.sh b/.buildkite/scripts/gradle-build-cache-validation.sh
new file mode 100755
index 0000000000000..75dc9b264b8bc
--- /dev/null
+++ b/.buildkite/scripts/gradle-build-cache-validation.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+set -euo pipefail
+
+VALIDATION_SCRIPTS_VERSION=2.5.1
+GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key)
+export GRADLE_ENTERPRISE_ACCESS_KEY
+
+curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip
+
+# Create a temporary file
+tmpOutputFile=$(mktemp)
+trap "rm $tmpOutputFile" EXIT
+
+set +e
+gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile
+# Capture the return value
+retval=$?
+set -e
+
+# Now read the content from the temporary file into a variable
+perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g')
+investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g')
+
+# Initialize HTML output variable
+summaryHtml="
Build Cache Performance Characteristics
"
+summaryHtml+=""
+
+# Process each line of the string
+while IFS=: read -r label value; do
+ if [[ -n "$label" && -n "$value" ]]; then
+ # Trim whitespace from label and value
+ trimmed_label=$(echo "$label" | xargs)
+ trimmed_value=$(echo "$value" | xargs)
+
+ # Append to HTML output variable
+ summaryHtml+="- $trimmed_label: $trimmed_value
"
+ fi
+done <<< "$perfOutput"
+
+summaryHtml+="
"
+
+# generate html for links
+summaryHtml+="Investigation Links
"
+summaryHtml+=""
+
+# Process each line of the string
+while IFS= read -r line; do
+ if [[ "$line" =~ http.* ]]; then
+ # Extract URL and description using awk
+ url=$(echo "$line" | awk '{print $NF}')
+ description=$(echo "$line" | sed -e "s/:.*//")
+
+ # Append to HTML output variable
+ summaryHtml+=" - $description
"
+ fi
+done <<< "$investigationOutput"
+
+# End of the HTML content
+summaryHtml+="
"
+
+cat << EOF | buildkite-agent annotate --context "ctx-validation-summary" --style "info"
+$summaryHtml
+EOF
+
+# Check if the command was successful
+if [ $retval -eq 0 ]; then
+ echo "Experiment completed successfully"
+elif [ $retval -eq 1 ]; then
+ echo "An invalid input was provided while attempting to run the experiment"
+elif [ $retval -eq 2 ]; then
+ echo "One of the builds that is part of the experiment failed"
+elif [ $retval -eq 3 ]; then
+ echo "The build was not fully cacheable for the given task graph"
+elif [ $retval -eq 3 ]; then
+ echo "An unclassified, fatal error happened while running the experiment"
+fi
+
+exit $retval
+
diff --git a/.buildkite/scripts/gradle-cache-validation.sh b/.buildkite/scripts/gradle-cache-validation.sh
deleted file mode 100755
index fbb957bc3b26b..0000000000000
--- a/.buildkite/scripts/gradle-cache-validation.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-VALIDATION_SCRIPTS_VERSION=2.5.1
-GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key)
-export GRADLE_ENTERPRISE_ACCESS_KEY
-
-curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip
-
-# Create a temporary file
-tmpOutputFile=$(mktemp)
-trap "rm $tmpOutputFile" EXIT
-
-gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile
-
-# Capture the return value
-retval=$?
-
-# Now read the content from the temporary file into a variable
-perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g')
-investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g')
-
-# Initialize HTML output variable
-summaryHtml="Performance Characteristics
"
-summaryHtml+=""
-
-# Process each line of the string
-while IFS=: read -r label value; do
- if [[ -n "$label" && -n "$value" ]]; then
- # Trim whitespace from label and value
- trimmed_label=$(echo "$label" | xargs)
- trimmed_value=$(echo "$value" | xargs)
-
- # Append to HTML output variable
- summaryHtml+="- $trimmed_label: $trimmed_value
"
- fi
-done <<< "$perfOutput"
-
-summaryHtml+="
"
-
-# generate html for links
-summaryHtml+="Investigation Links
"
-summaryHtml+=""
-
-# Process each line of the string
-while IFS= read -r line; do
- if [[ "$line" =~ http.* ]]; then
- # Extract URL and description using awk
- url=$(echo "$line" | awk '{print $NF}')
- description=$(echo "$line" | sed -e "s/:.*//")
-
- # Append to HTML output variable
- summaryHtml+=" - $description
"
- fi
-done <<< "$investigationOutput"
-
-# End of the HTML content
-summaryHtml+="
"
-
-cat << EOF | buildkite-agent annotate --context "ctx-validation-summary" --style "info"
-$summaryHtml
-EOF
-
-# Check if the command was successful
-if [ $retval -eq 0 ]; then
- echo "Experiment completed successfully"
-elif [ $retval -eq 1 ]; then
- echo "An invalid input was provided while attempting to run the experiment"
-elif [ $retval -eq 2 ]; then
- echo "One of the builds that is part of the experiment failed"
-elif [ $retval -eq 3 ]; then
- echo "The build was not fully cacheable for the given task graph"
-elif [ $retval -eq 3 ]; then
- echo "An unclassified, fatal error happened while running the experiment"
-fi
-
-exit $retval
-
diff --git a/.buildkite/scripts/gradle-configuration-cache-validation.sh b/.buildkite/scripts/gradle-configuration-cache-validation.sh
new file mode 100755
index 0000000000000..8249155c5ffc5
--- /dev/null
+++ b/.buildkite/scripts/gradle-configuration-cache-validation.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# TODO/ FIXIT without a full resolved gradle home, we see issues configuration cache reuse
+./gradlew --max-workers=8 --parallel --scan --no-daemon precommit
+
+./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2
+
+# Create a temporary file
+tmpOutputFile=$(mktemp)
+trap "rm $tmpOutputFile" EXIT
+
+echo "2nd run"
+# TODO run-gradle.sh script causes issues because of init script handling
+./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile
+
+# Check if the command was successful
+if grep -q "Configuration cache entry reused." $tmpOutputFile; then
+ echo "Gradle configuration cache reused"
+ exit 0
+else
+ echo "Failed to reuse Gradle configuration cache."
+ exit 1
+fi
+
+
diff --git a/.buildkite/scripts/lucene-snapshot/update-branch.sh b/.buildkite/scripts/lucene-snapshot/update-branch.sh
index d02123f3236e7..6a2d1e3df05f7 100755
--- a/.buildkite/scripts/lucene-snapshot/update-branch.sh
+++ b/.buildkite/scripts/lucene-snapshot/update-branch.sh
@@ -2,17 +2,17 @@
set -euo pipefail
-if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then
- echo "Error: This script should only be run on the lucene_snapshot branch"
+if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then
+ echo "Error: This script should only be run on lucene_snapshot branches"
exit 1
fi
-echo --- Updating lucene_snapshot branch with main
+echo --- Updating "$BUILDKITE_BRANCH" branch with main
git config --global user.name elasticsearchmachine
git config --global user.email 'infra-root+elasticsearchmachine@elastic.co'
-git checkout lucene_snapshot
+git checkout "$BUILDKITE_BRANCH"
git fetch origin main
git merge --no-edit origin/main
-git push origin lucene_snapshot
+git push origin "$BUILDKITE_BRANCH"
diff --git a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh
index 75f42a32cb590..7bec83d055139 100755
--- a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh
+++ b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh
@@ -2,8 +2,8 @@
set -euo pipefail
-if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then
- echo "Error: This script should only be run on the lucene_snapshot branch"
+if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then
+ echo "Error: This script should only be run on the lucene_snapshot branches"
exit 1
fi
diff --git a/.ci/bwcVersions b/.ci/bwcVersions
index 776be80e0d291..e43b3333dd755 100644
--- a/.ci/bwcVersions
+++ b/.ci/bwcVersions
@@ -16,7 +16,7 @@ BWC_VERSION:
- "7.14.2"
- "7.15.2"
- "7.16.3"
- - "7.17.23"
+ - "7.17.24"
- "8.0.1"
- "8.1.3"
- "8.2.3"
@@ -31,6 +31,6 @@ BWC_VERSION:
- "8.11.4"
- "8.12.2"
- "8.13.4"
- - "8.14.4"
- - "8.15.0"
+ - "8.14.3"
+ - "8.15.1"
- "8.16.0"
diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions
index f5f7f7a7d4ecb..2eea118e57e2a 100644
--- a/.ci/snapshotBwcVersions
+++ b/.ci/snapshotBwcVersions
@@ -1,5 +1,4 @@
BWC_VERSION:
- - "7.17.23"
- - "8.14.4"
- - "8.15.0"
+ - "7.17.24"
+ - "8.15.1"
- "8.16.0"
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 0f7e3073ed022..5b98444c044d2 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -27,8 +27,12 @@ libs/logstash-bridge @elastic/logstash
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @elastic/kibana-security
# APM Data index templates, etc.
-x-pack/plugin/apm-data/src/main/resources @elastic/apm-server
-x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/apm-server
+x-pack/plugin/apm-data/src/main/resources @elastic/obs-ds-intake-services
+x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/obs-ds-intake-services
+
+# OTel
+x-pack/plugin/otel-data/src/main/resources @elastic/obs-ds-intake-services
+x-pack/plugin/otel-data/src/yamlRestTest/resources @elastic/obs-ds-intake-services
# Delivery
gradle @elastic/es-delivery
diff --git a/README.asciidoc b/README.asciidoc
index dc27735d3c015..c1945e56b025b 100644
--- a/README.asciidoc
+++ b/README.asciidoc
@@ -1,6 +1,6 @@
= Elasticsearch
-Elasticsearch is a distributed search and analytics engine optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more.
+Elasticsearch is a distributed search and analytics engine, scalable data store and vector database optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more.
Use cases enabled by Elasticsearch include:
@@ -33,76 +33,144 @@ https://www.elastic.co/downloads/elasticsearch[elastic.co/downloads/elasticsearc
=== Run Elasticsearch locally
////
-IMPORTANT: This content is replicated in the Elasticsearch guide.
-If you make changes, you must also update setup/set-up-local-dev-deployment.asciidoc.
+IMPORTANT: This content is replicated in the Elasticsearch guide. See `run-elasticsearch-locally.asciidoc`.
+Both will soon be replaced by a quickstart script.
////
-To try out Elasticsearch on your own machine, we recommend using Docker
-and running both Elasticsearch and Kibana.
-Docker images are available from the https://www.docker.elastic.co[Elastic Docker registry].
+[WARNING]
+====
+DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS.
-NOTE: Starting in Elasticsearch 8.0, security is enabled by default.
-The first time you start Elasticsearch, TLS encryption is configured automatically,
-a password is generated for the `elastic` user,
-and a Kibana enrollment token is created so you can connect Kibana to your secured cluster.
+This setup is intended for local development and testing only.
+====
-For other installation options, see the
-https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation documentation].
+The following commands help you very quickly spin up a single-node Elasticsearch cluster, together with Kibana in Docker.
+Use this setup for local development or testing.
-**Start Elasticsearch**
+==== Prerequisites
-. Install and start https://www.docker.com/products/docker-desktop[Docker
-Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at least 4GB.
+If you don't have Docker installed, https://www.docker.com/products/docker-desktop[download and install Docker Desktop] for your operating system.
-. Start an Elasticsearch container:
-+
+==== Set environment variables
+
+Configure the following environment variables.
+
+[source,sh]
+----
+export ELASTIC_PASSWORD="" # password for "elastic" username
+export KIBANA_PASSWORD="" # Used internally by Kibana, must be at least 6 characters long
+----
+
+==== Create a Docker network
+
+To run both Elasticsearch and Kibana, you'll need to create a Docker network:
+
+[source,sh]
----
-docker network create elastic
-docker pull docker.elastic.co/elasticsearch/elasticsearch:{version} <1>
-docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version}
+docker network create elastic-net
----
-<1> Replace {version} with the version of Elasticsearch you want to run.
-+
-When you start Elasticsearch for the first time, the generated `elastic` user password and
-Kibana enrollment token are output to the terminal.
-+
-NOTE: You might need to scroll back a bit in the terminal to view the password
-and enrollment token.
-. Copy the generated password and enrollment token and save them in a secure
-location. These values are shown only when you start Elasticsearch for the first time.
-You'll use these to enroll Kibana with your Elasticsearch cluster and log in.
+==== Run Elasticsearch
+
+Start the Elasticsearch container with the following command:
-**Start Kibana**
+[source,sh]
+----
+docker run -p 127.0.0.1:9200:9200 -d --name elasticsearch --network elastic-net \
+ -e ELASTIC_PASSWORD=$ELASTIC_PASSWORD \
+ -e "discovery.type=single-node" \
+ -e "xpack.security.http.ssl.enabled=false" \
+ -e "xpack.license.self_generated.type=trial" \
+ docker.elastic.co/elasticsearch/elasticsearch:{version}
+----
-Kibana enables you to easily send requests to Elasticsearch and analyze, visualize, and manage data interactively.
+==== Run Kibana (optional)
-. In a new terminal session, start Kibana and connect it to your Elasticsearch container:
-+
+To run Kibana, you must first set the `kibana_system` password in the Elasticsearch container.
+
+[source,sh]
----
-docker pull docker.elastic.co/kibana/kibana:{version} <1>
-docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version}
+# configure the Kibana password in the ES container
+curl -u elastic:$ELASTIC_PASSWORD \
+ -X POST \
+ http://localhost:9200/_security/user/kibana_system/_password \
+ -d '{"password":"'"$KIBANA_PASSWORD"'"}' \
+ -H 'Content-Type: application/json'
----
-<1> Replace {version} with the version of Kibana you want to run.
-+
-When you start Kibana, a unique URL is output to your terminal.
+// NOTCONSOLE
-. To access Kibana, open the generated URL in your browser.
+Start the Kibana container with the following command:
- .. Paste the enrollment token that you copied when starting
- Elasticsearch and click the button to connect your Kibana instance with Elasticsearch.
+[source,sh]
+----
+docker run -p 127.0.0.1:5601:5601 -d --name kibana --network elastic-net \
+ -e ELASTICSEARCH_URL=http://elasticsearch:9200 \
+ -e ELASTICSEARCH_HOSTS=http://elasticsearch:9200 \
+ -e ELASTICSEARCH_USERNAME=kibana_system \
+ -e ELASTICSEARCH_PASSWORD=$KIBANA_PASSWORD \
+ -e "xpack.security.enabled=false" \
+ -e "xpack.license.self_generated.type=trial" \
+ docker.elastic.co/kibana/kibana:{version}
+----
- .. Log in to Kibana as the `elastic` user with the password that was generated
- when you started Elasticsearch.
+.Trial license
+[%collapsible]
+====
+The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page].
+====
-**Send requests to Elasticsearch**
+==== Send requests to Elasticsearch
You send data and other requests to Elasticsearch through REST APIs.
You can interact with Elasticsearch using any client that sends HTTP requests,
such as the https://www.elastic.co/guide/en/elasticsearch/client/index.html[Elasticsearch
language clients] and https://curl.se[curl].
+
+===== Using curl
+
+Here's an example curl command to create a new Elasticsearch index, using basic auth:
+
+[source,sh]
+----
+curl -u elastic:$ELASTIC_PASSWORD \
+ -X PUT \
+ http://localhost:9200/my-new-index \
+ -H 'Content-Type: application/json'
+----
+// NOTCONSOLE
+
+===== Using a language client
+
+To connect to your local dev Elasticsearch cluster with a language client, you can use basic authentication with the `elastic` username and the password you set in the environment variable.
+
+You'll use the following connection details:
+
+* **Elasticsearch endpoint**: `http://localhost:9200`
+* **Username**: `elastic`
+* **Password**: `$ELASTIC_PASSWORD` (Value you set in the environment variable)
+
+For example, to connect with the Python `elasticsearch` client:
+
+[source,python]
+----
+import os
+from elasticsearch import Elasticsearch
+
+username = 'elastic'
+password = os.getenv('ELASTIC_PASSWORD') # Value you set in the environment variable
+
+client = Elasticsearch(
+ "http://localhost:9200",
+ basic_auth=(username, password)
+)
+
+print(client.info())
+----
+
+===== Using the Dev Tools Console
+
Kibana's developer console provides an easy way to experiment and test requests.
-To access the console, go to **Management > Dev Tools**.
+To access the console, open Kibana, then go to **Management** > **Dev Tools**.
**Add data**
diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle
index 49e81a67e85f9..b16621aaaa471 100644
--- a/benchmarks/build.gradle
+++ b/benchmarks/build.gradle
@@ -1,4 +1,5 @@
import org.elasticsearch.gradle.internal.info.BuildParams
+import org.elasticsearch.gradle.internal.test.TestUtil
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
@@ -29,6 +30,7 @@ tasks.named("javadoc").configure { enabled = false }
configurations {
expression
painless
+ nativeLib
}
dependencies {
@@ -37,6 +39,7 @@ dependencies {
// us to invoke the JMH uberjar as usual.
exclude group: 'net.sf.jopt-simple', module: 'jopt-simple'
}
+ api(project(':libs:elasticsearch-h3'))
api(project(':modules:aggregations'))
api(project(':x-pack:plugin:esql-core'))
api(project(':x-pack:plugin:esql'))
@@ -44,6 +47,7 @@ dependencies {
implementation project(path: ':libs:elasticsearch-simdvec')
expression(project(path: ':modules:lang-expression', configuration: 'zip'))
painless(project(path: ':modules:lang-painless', configuration: 'zip'))
+ nativeLib(project(':libs:elasticsearch-native'))
api "org.openjdk.jmh:jmh-core:$versions.jmh"
annotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh"
// Dependencies of JMH
@@ -75,17 +79,8 @@ tasks.register("copyPainless", Copy) {
tasks.named("run").configure {
executable = "${BuildParams.runtimeJavaHome}/bin/java"
args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index"
- dependsOn "copyExpression", "copyPainless"
- systemProperty 'java.library.path', file("../libs/native/libraries/build/platform/${platformName()}-${os.arch}")
-}
-
-String platformName() {
- String name = System.getProperty("os.name");
- if (name.startsWith("Mac")) {
- return "darwin";
- } else {
- return name.toLowerCase(Locale.ROOT);
- }
+ dependsOn "copyExpression", "copyPainless", configurations.nativeLib
+ systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString())
}
spotless {
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java
index 49603043e7bcc..59fdfff3025a1 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java
@@ -20,13 +20,10 @@
import org.elasticsearch.compute.data.BlockFactory;
import org.elasticsearch.compute.data.BooleanBigArrayBlock;
import org.elasticsearch.compute.data.BooleanBigArrayVector;
-import org.elasticsearch.compute.data.BooleanBlock;
import org.elasticsearch.compute.data.BooleanVector;
-import org.elasticsearch.compute.data.BytesRefBlock;
import org.elasticsearch.compute.data.BytesRefVector;
import org.elasticsearch.compute.data.DoubleBigArrayBlock;
import org.elasticsearch.compute.data.DoubleBigArrayVector;
-import org.elasticsearch.compute.data.DoubleBlock;
import org.elasticsearch.compute.data.DoubleVector;
import org.elasticsearch.compute.data.IntBigArrayBlock;
import org.elasticsearch.compute.data.IntBigArrayVector;
@@ -34,39 +31,13 @@
import org.elasticsearch.compute.data.IntVector;
import org.elasticsearch.compute.data.LongBigArrayBlock;
import org.elasticsearch.compute.data.LongBigArrayVector;
-import org.elasticsearch.compute.data.LongBlock;
import org.elasticsearch.compute.data.LongVector;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OperationsPerInvocation;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Param;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
-import org.openjdk.jmh.annotations.Warmup;
import java.util.ArrayList;
import java.util.BitSet;
-import java.util.Collections;
-import java.util.List;
import java.util.Random;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.IntStream;
-
-@Warmup(iterations = 5)
-@Measurement(iterations = 7)
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.NANOSECONDS)
-@State(Scope.Thread)
-@Fork(1)
-public class BlockBenchmark {
+public class BlockBenchmark {
/**
* All data type/block kind combinations to be loaded before the benchmark.
* It is important to be exhaustive here so that all implementers of {@link IntBlock#getInt(int)} are actually loaded when we benchmark
@@ -114,35 +85,12 @@ public class BlockBenchmark {
private static final int MAX_MV_ELEMENTS = 100;
private static final int MAX_BYTES_REF_LENGTH = 255;
- private static final Random random = new Random();
-
- private static final BlockFactory blockFactory = BlockFactory.getInstance(
- new NoopCircuitBreaker("noop"),
- BigArrays.NON_RECYCLING_INSTANCE
- );
-
- static {
- // Smoke test all the expected values and force loading subclasses more like prod
- int totalPositions = 10;
- long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION];
-
- for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) {
- String[] params = paramString.split("/");
- String dataType = params[0];
- String blockKind = params[1];
-
- BenchmarkBlocks data = buildBlocks(dataType, blockKind, totalPositions);
- int[][] traversalOrders = createTraversalOrders(data.blocks, false);
- run(dataType, data, traversalOrders, actualCheckSums);
- assertCheckSums(data, actualCheckSums);
- }
- }
+ static final Random random = new Random();
- private record BenchmarkBlocks(Block[] blocks, long[] checkSums) {};
+ static final BlockFactory blockFactory = BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE);
- private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, int totalPositions) {
+ static Block[] buildBlocks(String dataType, String blockKind, int totalPositions) {
Block[] blocks = new Block[NUM_BLOCKS_PER_ITERATION];
- long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION];
switch (dataType) {
case "boolean" -> {
@@ -237,11 +185,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in
}
}
}
-
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- BooleanBlock block = (BooleanBlock) blocks[blockIndex];
- checkSums[blockIndex] = computeBooleanCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
- }
}
case "BytesRef" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
@@ -294,11 +237,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in
}
}
}
-
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- BytesRefBlock block = (BytesRefBlock) blocks[blockIndex];
- checkSums[blockIndex] = computeBytesRefCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
- }
}
case "double" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
@@ -386,11 +324,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in
}
}
}
-
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- DoubleBlock block = (DoubleBlock) blocks[blockIndex];
- checkSums[blockIndex] = computeDoubleCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
- }
}
case "int" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
@@ -478,11 +411,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in
}
}
}
-
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- IntBlock block = (IntBlock) blocks[blockIndex];
- checkSums[blockIndex] = computeIntCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
- }
}
case "long" -> {
for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
@@ -570,36 +498,12 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in
}
}
}
-
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- LongBlock block = (LongBlock) blocks[blockIndex];
- checkSums[blockIndex] = computeLongCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
- }
}
default -> {
throw new IllegalStateException("illegal data type [" + dataType + "]");
}
}
-
- return new BenchmarkBlocks(blocks, checkSums);
- }
-
- private static int[][] createTraversalOrders(Block[] blocks, boolean randomized) {
- int[][] orders = new int[blocks.length][];
-
- for (int i = 0; i < blocks.length; i++) {
- IntStream positionsStream = IntStream.range(0, blocks[i].getPositionCount());
-
- if (randomized) {
- List positions = new java.util.ArrayList<>(positionsStream.boxed().toList());
- Collections.shuffle(positions, random);
- orders[i] = positions.stream().mapToInt(x -> x).toArray();
- } else {
- orders[i] = positionsStream.toArray();
- }
- }
-
- return orders;
+ return blocks;
}
private static int[] randomFirstValueIndexes(int totalPositions) {
@@ -631,220 +535,4 @@ private static BitSet randomNulls(int positionCount) {
return nulls;
}
-
- private static void run(String dataType, BenchmarkBlocks data, int[][] traversalOrders, long[] resultCheckSums) {
- switch (dataType) {
- case "boolean" -> {
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- BooleanBlock block = (BooleanBlock) data.blocks[blockIndex];
-
- resultCheckSums[blockIndex] = computeBooleanCheckSum(block, traversalOrders[blockIndex]);
- }
- }
- case "BytesRef" -> {
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- BytesRefBlock block = (BytesRefBlock) data.blocks[blockIndex];
-
- resultCheckSums[blockIndex] = computeBytesRefCheckSum(block, traversalOrders[blockIndex]);
- }
- }
- case "double" -> {
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- DoubleBlock block = (DoubleBlock) data.blocks[blockIndex];
-
- resultCheckSums[blockIndex] = computeDoubleCheckSum(block, traversalOrders[blockIndex]);
- }
- }
- case "int" -> {
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- IntBlock block = (IntBlock) data.blocks[blockIndex];
-
- resultCheckSums[blockIndex] = computeIntCheckSum(block, traversalOrders[blockIndex]);
- }
- }
- case "long" -> {
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- LongBlock block = (LongBlock) data.blocks[blockIndex];
-
- resultCheckSums[blockIndex] = computeLongCheckSum(block, traversalOrders[blockIndex]);
- }
- }
- default -> {
- throw new IllegalStateException();
- }
- }
- }
-
- private static void assertCheckSums(BenchmarkBlocks data, long[] actualCheckSums) {
- for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
- if (actualCheckSums[blockIndex] != data.checkSums[blockIndex]) {
- throw new AssertionError("checksums do not match for block [" + blockIndex + "]");
- }
- }
- }
-
- private static long computeBooleanCheckSum(BooleanBlock block, int[] traversalOrder) {
- long sum = 0;
-
- for (int position : traversalOrder) {
- if (block.isNull(position)) {
- continue;
- }
- int start = block.getFirstValueIndex(position);
- int end = start + block.getValueCount(position);
- for (int i = start; i < end; i++) {
- sum += block.getBoolean(i) ? 1 : 0;
- }
- }
-
- return sum;
- }
-
- private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversalOrder) {
- long sum = 0;
- BytesRef currentValue = new BytesRef();
-
- for (int position : traversalOrder) {
- if (block.isNull(position)) {
- continue;
- }
- int start = block.getFirstValueIndex(position);
- int end = start + block.getValueCount(position);
- for (int i = start; i < end; i++) {
- block.getBytesRef(i, currentValue);
- sum += currentValue.length > 0 ? currentValue.bytes[0] : 0;
- }
- }
-
- return sum;
- }
-
- private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) {
- long sum = 0;
-
- for (int position : traversalOrder) {
- if (block.isNull(position)) {
- continue;
- }
- int start = block.getFirstValueIndex(position);
- int end = start + block.getValueCount(position);
- for (int i = start; i < end; i++) {
- // Use an operation that is not affected by rounding errors. Otherwise, the result may depend on the traversalOrder.
- sum += (long) block.getDouble(i);
- }
- }
-
- return sum;
- }
-
- private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) {
- int sum = 0;
-
- for (int position : traversalOrder) {
- if (block.isNull(position)) {
- continue;
- }
- int start = block.getFirstValueIndex(position);
- int end = start + block.getValueCount(position);
- for (int i = start; i < end; i++) {
- sum += block.getInt(i);
- }
- }
-
- return sum;
- }
-
- private static long computeLongCheckSum(LongBlock block, int[] traversalOrder) {
- long sum = 0;
-
- for (int position : traversalOrder) {
- if (block.isNull(position)) {
- continue;
- }
- int start = block.getFirstValueIndex(position);
- int end = start + block.getValueCount(position);
- for (int i = start; i < end; i++) {
- sum += block.getLong(i);
- }
- }
-
- return sum;
- }
-
- private static boolean isRandom(String accessType) {
- return accessType.equalsIgnoreCase("random");
- }
-
- /**
- * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS}
- */
- @Param(
- {
- "boolean/array",
- "boolean/array-multivalue-null",
- "boolean/big-array",
- "boolean/big-array-multivalue-null",
- "boolean/vector",
- "boolean/vector-big-array",
- "boolean/vector-const",
- "BytesRef/array",
- "BytesRef/array-multivalue-null",
- "BytesRef/vector",
- "BytesRef/vector-const",
- "double/array",
- "double/array-multivalue-null",
- "double/big-array",
- "double/big-array-multivalue-null",
- "double/vector",
- "double/vector-big-array",
- "double/vector-const",
- "int/array",
- "int/array-multivalue-null",
- "int/big-array",
- "int/big-array-multivalue-null",
- "int/vector",
- "int/vector-big-array",
- "int/vector-const",
- "long/array",
- "long/array-multivalue-null",
- "long/big-array",
- "long/big-array-multivalue-null",
- "long/vector",
- "long/vector-big-array",
- "long/vector-const" }
- )
- public String dataTypeAndBlockKind;
-
- @Param({ "sequential", "random" })
- public String accessType;
-
- private BenchmarkBlocks data;
-
- private int[][] traversalOrders;
-
- private final long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION];
-
- @Setup
- public void setup() {
- String[] params = dataTypeAndBlockKind.split("/");
- String dataType = params[0];
- String blockKind = params[1];
-
- data = buildBlocks(dataType, blockKind, BLOCK_TOTAL_POSITIONS);
- traversalOrders = createTraversalOrders(data.blocks, isRandom(accessType));
- }
-
- @Benchmark
- @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS)
- public void run() {
- String[] params = dataTypeAndBlockKind.split("/");
- String dataType = params[0];
-
- run(dataType, data, traversalOrders, actualCheckSums);
- }
-
- @TearDown(Level.Iteration)
- public void assertCheckSums() {
- assertCheckSums(data, actualCheckSums);
- }
}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java
new file mode 100644
index 0000000000000..23048ad188a37
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java
@@ -0,0 +1,295 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.benchmark.compute.operator;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.compute.data.Block;
+import org.elasticsearch.compute.data.BooleanBlock;
+import org.elasticsearch.compute.data.BooleanVector;
+import org.elasticsearch.compute.data.BytesRefBlock;
+import org.elasticsearch.compute.data.DoubleBlock;
+import org.elasticsearch.compute.data.IntBlock;
+import org.elasticsearch.compute.data.LongBlock;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OperationsPerInvocation;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Warmup;
+
+import java.util.concurrent.TimeUnit;
+
+@Warmup(iterations = 5)
+@Measurement(iterations = 7)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Thread)
+@Fork(1)
+public class BlockKeepMaskBenchmark extends BlockBenchmark {
+ static {
+ // Smoke test all the expected values and force loading subclasses more like prod
+ int totalPositions = 10;
+ for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) {
+ String[] params = paramString.split("/");
+ String dataType = params[0];
+ String blockKind = params[1];
+ BooleanVector mask = buildMask(totalPositions);
+
+ BenchmarkBlocks data = buildBenchmarkBlocks(dataType, blockKind, mask, totalPositions);
+ Block[] results = new Block[NUM_BLOCKS_PER_ITERATION];
+ run(data, mask, results);
+ assertCheckSums(dataType, blockKind, data, results, totalPositions);
+ }
+ }
+
+ record BenchmarkBlocks(Block[] blocks, long[] checkSums) {};
+
+ static BenchmarkBlocks buildBenchmarkBlocks(String dataType, String blockKind, BooleanVector mask, int totalPositions) {
+ Block[] blocks = BlockBenchmark.buildBlocks(dataType, blockKind, totalPositions);
+ return new BenchmarkBlocks(blocks, checksumsFor(dataType, blocks, mask));
+ }
+
+ static long[] checksumsFor(String dataType, Block[] blocks, BooleanVector mask) {
+ long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION];
+ switch (dataType) {
+ case "boolean" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ BooleanBlock block = (BooleanBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeBooleanCheckSum(block, mask);
+ }
+ }
+ case "BytesRef" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ BytesRefBlock block = (BytesRefBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeBytesRefCheckSum(block, mask);
+ }
+ }
+ case "double" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ DoubleBlock block = (DoubleBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeDoubleCheckSum(block, mask);
+ }
+ }
+ case "int" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ IntBlock block = (IntBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeIntCheckSum(block, mask);
+ }
+ }
+ case "long" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ LongBlock block = (LongBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeLongCheckSum(block, mask);
+ }
+ }
+ // TODO float
+ default -> throw new IllegalStateException("illegal data type [" + dataType + "]");
+ }
+ return checkSums;
+ }
+
+ static BooleanVector buildMask(int totalPositions) {
+ try (BooleanVector.FixedBuilder builder = blockFactory.newBooleanVectorFixedBuilder(totalPositions)) {
+ for (int p = 0; p < totalPositions; p++) {
+ builder.appendBoolean(p % 2 == 0);
+ }
+ return builder.build();
+ }
+ }
+
+ private static void run(BenchmarkBlocks data, BooleanVector mask, Block[] results) {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ results[blockIndex] = data.blocks[blockIndex].keepMask(mask);
+ }
+ }
+
+ private static void assertCheckSums(String dataType, String blockKind, BenchmarkBlocks data, Block[] results, int positionCount) {
+ long[] checkSums = checksumsFor(dataType, results, blockFactory.newConstantBooleanVector(true, positionCount));
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ if (checkSums[blockIndex] != data.checkSums[blockIndex]) {
+ throw new AssertionError(
+ "checksums do not match for block ["
+ + blockIndex
+ + "]["
+ + dataType
+ + "]["
+ + blockKind
+ + "]: "
+ + checkSums[blockIndex]
+ + " vs "
+ + data.checkSums[blockIndex]
+ );
+ }
+ }
+ }
+
+ private static long computeBooleanCheckSum(BooleanBlock block, BooleanVector mask) {
+ long sum = 0;
+
+ for (int p = 0; p < block.getPositionCount(); p++) {
+ if (block.isNull(p) || mask.getBoolean(p) == false) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(p);
+ int end = start + block.getValueCount(p);
+ for (int i = start; i < end; i++) {
+ sum += block.getBoolean(i) ? 1 : 0;
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeBytesRefCheckSum(BytesRefBlock block, BooleanVector mask) {
+ long sum = 0;
+ BytesRef scratch = new BytesRef();
+
+ for (int p = 0; p < block.getPositionCount(); p++) {
+ if (block.isNull(p) || mask.getBoolean(p) == false) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(p);
+ int end = start + block.getValueCount(p);
+ for (int i = start; i < end; i++) {
+ BytesRef v = block.getBytesRef(i, scratch);
+ sum += v.length > 0 ? v.bytes[v.offset] : 0;
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeDoubleCheckSum(DoubleBlock block, BooleanVector mask) {
+ long sum = 0;
+
+ for (int p = 0; p < block.getPositionCount(); p++) {
+ if (block.isNull(p) || mask.getBoolean(p) == false) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(p);
+ int end = start + block.getValueCount(p);
+ for (int i = start; i < end; i++) {
+ sum += (long) block.getDouble(i);
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeIntCheckSum(IntBlock block, BooleanVector mask) {
+ int sum = 0;
+
+ for (int p = 0; p < block.getPositionCount(); p++) {
+ if (block.isNull(p) || mask.getBoolean(p) == false) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(p);
+ int end = start + block.getValueCount(p);
+ for (int i = start; i < end; i++) {
+ sum += block.getInt(i);
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeLongCheckSum(LongBlock block, BooleanVector mask) {
+ long sum = 0;
+
+ for (int p = 0; p < block.getPositionCount(); p++) {
+ if (block.isNull(p) || mask.getBoolean(p) == false) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(p);
+ int end = start + block.getValueCount(p);
+ for (int i = start; i < end; i++) {
+ sum += block.getLong(i);
+ }
+ }
+
+ return sum;
+ }
+
+ /**
+ * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS}
+ */
+ @Param(
+ {
+ "boolean/array",
+ "boolean/array-multivalue-null",
+ "boolean/big-array",
+ "boolean/big-array-multivalue-null",
+ "boolean/vector",
+ "boolean/vector-big-array",
+ "boolean/vector-const",
+ "BytesRef/array",
+ "BytesRef/array-multivalue-null",
+ "BytesRef/vector",
+ "BytesRef/vector-const",
+ "double/array",
+ "double/array-multivalue-null",
+ "double/big-array",
+ "double/big-array-multivalue-null",
+ "double/vector",
+ "double/vector-big-array",
+ "double/vector-const",
+ "int/array",
+ "int/array-multivalue-null",
+ "int/big-array",
+ "int/big-array-multivalue-null",
+ "int/vector",
+ "int/vector-big-array",
+ "int/vector-const",
+ "long/array",
+ "long/array-multivalue-null",
+ "long/big-array",
+ "long/big-array-multivalue-null",
+ "long/vector",
+ "long/vector-big-array",
+ "long/vector-const" }
+ )
+ public String dataTypeAndBlockKind;
+
+ private BenchmarkBlocks data;
+
+ private final BooleanVector mask = buildMask(BLOCK_TOTAL_POSITIONS);
+
+ private final Block[] results = new Block[NUM_BLOCKS_PER_ITERATION];
+
+ @Setup
+ public void setup() {
+ String[] params = dataTypeAndBlockKind.split("/");
+ String dataType = params[0];
+ String blockKind = params[1];
+
+ data = buildBenchmarkBlocks(dataType, blockKind, mask, BLOCK_TOTAL_POSITIONS);
+ }
+
+ @Benchmark
+ @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS)
+ public void run() {
+ run(data, mask, results);
+ }
+
+ @TearDown(Level.Iteration)
+ public void assertCheckSums() {
+ String[] params = dataTypeAndBlockKind.split("/");
+ String dataType = params[0];
+ String blockKind = params[1];
+ assertCheckSums(dataType, blockKind, data, results, BLOCK_TOTAL_POSITIONS);
+ }
+}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockReadBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockReadBenchmark.java
new file mode 100644
index 0000000000000..327dcfcff3a28
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockReadBenchmark.java
@@ -0,0 +1,319 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.benchmark.compute.operator;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.util.*;
+import org.elasticsearch.compute.data.*;
+import org.openjdk.jmh.annotations.*;
+
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
+
+@Warmup(iterations = 5)
+@Measurement(iterations = 7)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Thread)
+@Fork(1)
+public class BlockReadBenchmark extends BlockBenchmark {
+ static {
+ // Smoke test all the expected values and force loading subclasses more like prod
+ int totalPositions = 10;
+ long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION];
+
+ for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) {
+ String[] params = paramString.split("/");
+ String dataType = params[0];
+ String blockKind = params[1];
+
+ BenchmarkBlocks data = buildBenchmarkBlocks(dataType, blockKind, totalPositions);
+ int[][] traversalOrders = createTraversalOrders(data.blocks(), false);
+ run(dataType, data, traversalOrders, actualCheckSums);
+ assertCheckSums(data, actualCheckSums);
+ }
+ }
+
+ private static int[][] createTraversalOrders(Block[] blocks, boolean randomized) {
+ int[][] orders = new int[blocks.length][];
+
+ for (int i = 0; i < blocks.length; i++) {
+ IntStream positionsStream = IntStream.range(0, blocks[i].getPositionCount());
+
+ if (randomized) {
+ List positions = new ArrayList<>(positionsStream.boxed().toList());
+ Collections.shuffle(positions, random);
+ orders[i] = positions.stream().mapToInt(x -> x).toArray();
+ } else {
+ orders[i] = positionsStream.toArray();
+ }
+ }
+
+ return orders;
+ }
+
+ record BenchmarkBlocks(Block[] blocks, long[] checkSums) {};
+
+ static BenchmarkBlocks buildBenchmarkBlocks(String dataType, String blockKind, int totalPositions) {
+ Block[] blocks = BlockBenchmark.buildBlocks(dataType, blockKind, totalPositions);
+ long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION];
+ switch (dataType) {
+ case "boolean" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ BooleanBlock block = (BooleanBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeBooleanCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
+ }
+ }
+ case "BytesRef" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ BytesRefBlock block = (BytesRefBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeBytesRefCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
+ }
+ }
+ case "double" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ DoubleBlock block = (DoubleBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeDoubleCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
+ }
+ }
+ case "int" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ IntBlock block = (IntBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeIntCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
+ }
+ }
+ case "long" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ LongBlock block = (LongBlock) blocks[blockIndex];
+ checkSums[blockIndex] = computeLongCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray());
+ }
+ }
+ // TODO float
+ default -> throw new IllegalStateException("illegal data type [" + dataType + "]");
+ }
+ return new BenchmarkBlocks(blocks, checkSums);
+ }
+
+ private static void run(String dataType, BenchmarkBlocks data, int[][] traversalOrders, long[] resultCheckSums) {
+ switch (dataType) {
+ case "boolean" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ BooleanBlock block = (BooleanBlock) data.blocks[blockIndex];
+
+ resultCheckSums[blockIndex] = computeBooleanCheckSum(block, traversalOrders[blockIndex]);
+ }
+ }
+ case "BytesRef" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ BytesRefBlock block = (BytesRefBlock) data.blocks[blockIndex];
+
+ resultCheckSums[blockIndex] = computeBytesRefCheckSum(block, traversalOrders[blockIndex]);
+ }
+ }
+ case "double" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ DoubleBlock block = (DoubleBlock) data.blocks[blockIndex];
+
+ resultCheckSums[blockIndex] = computeDoubleCheckSum(block, traversalOrders[blockIndex]);
+ }
+ }
+ case "int" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ IntBlock block = (IntBlock) data.blocks[blockIndex];
+
+ resultCheckSums[blockIndex] = computeIntCheckSum(block, traversalOrders[blockIndex]);
+ }
+ }
+ case "long" -> {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ LongBlock block = (LongBlock) data.blocks[blockIndex];
+
+ resultCheckSums[blockIndex] = computeLongCheckSum(block, traversalOrders[blockIndex]);
+ }
+ }
+ default -> {
+ throw new IllegalStateException();
+ }
+ }
+ }
+
+ private static void assertCheckSums(BenchmarkBlocks data, long[] actualCheckSums) {
+ for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) {
+ if (actualCheckSums[blockIndex] != data.checkSums[blockIndex]) {
+ throw new AssertionError("checksums do not match for block [" + blockIndex + "]");
+ }
+ }
+ }
+
+ private static long computeBooleanCheckSum(BooleanBlock block, int[] traversalOrder) {
+ long sum = 0;
+
+ for (int position : traversalOrder) {
+ if (block.isNull(position)) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(position);
+ int end = start + block.getValueCount(position);
+ for (int i = start; i < end; i++) {
+ sum += block.getBoolean(i) ? 1 : 0;
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversalOrder) {
+ long sum = 0;
+ BytesRef scratch = new BytesRef();
+
+ for (int position : traversalOrder) {
+ if (block.isNull(position)) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(position);
+ int end = start + block.getValueCount(position);
+ for (int i = start; i < end; i++) {
+ BytesRef v = block.getBytesRef(i, scratch);
+ sum += v.length > 0 ? v.bytes[v.offset] : 0;
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) {
+ long sum = 0;
+
+ for (int position : traversalOrder) {
+ if (block.isNull(position)) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(position);
+ int end = start + block.getValueCount(position);
+ for (int i = start; i < end; i++) {
+ // Use an operation that is not affected by rounding errors. Otherwise, the result may depend on the traversalOrder.
+ sum += (long) block.getDouble(i);
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) {
+ int sum = 0;
+
+ for (int position : traversalOrder) {
+ if (block.isNull(position)) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(position);
+ int end = start + block.getValueCount(position);
+ for (int i = start; i < end; i++) {
+ sum += block.getInt(i);
+ }
+ }
+
+ return sum;
+ }
+
+ private static long computeLongCheckSum(LongBlock block, int[] traversalOrder) {
+ long sum = 0;
+
+ for (int position : traversalOrder) {
+ if (block.isNull(position)) {
+ continue;
+ }
+ int start = block.getFirstValueIndex(position);
+ int end = start + block.getValueCount(position);
+ for (int i = start; i < end; i++) {
+ sum += block.getLong(i);
+ }
+ }
+
+ return sum;
+ }
+
+ private static boolean isRandom(String accessType) {
+ return accessType.equalsIgnoreCase("random");
+ }
+
+ /**
+ * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS}
+ */
+ @Param(
+ {
+ "boolean/array",
+ "boolean/array-multivalue-null",
+ "boolean/big-array",
+ "boolean/big-array-multivalue-null",
+ "boolean/vector",
+ "boolean/vector-big-array",
+ "boolean/vector-const",
+ "BytesRef/array",
+ "BytesRef/array-multivalue-null",
+ "BytesRef/vector",
+ "BytesRef/vector-const",
+ "double/array",
+ "double/array-multivalue-null",
+ "double/big-array",
+ "double/big-array-multivalue-null",
+ "double/vector",
+ "double/vector-big-array",
+ "double/vector-const",
+ "int/array",
+ "int/array-multivalue-null",
+ "int/big-array",
+ "int/big-array-multivalue-null",
+ "int/vector",
+ "int/vector-big-array",
+ "int/vector-const",
+ "long/array",
+ "long/array-multivalue-null",
+ "long/big-array",
+ "long/big-array-multivalue-null",
+ "long/vector",
+ "long/vector-big-array",
+ "long/vector-const" }
+ )
+ public String dataTypeAndBlockKind;
+
+ @Param({ "sequential", "random" })
+ public String accessType;
+
+ private BenchmarkBlocks data;
+
+ private int[][] traversalOrders;
+
+ private final long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION];
+
+ @Setup
+ public void setup() {
+ String[] params = dataTypeAndBlockKind.split("/");
+ String dataType = params[0];
+ String blockKind = params[1];
+
+ data = buildBenchmarkBlocks(dataType, blockKind, BLOCK_TOTAL_POSITIONS);
+ traversalOrders = createTraversalOrders(data.blocks(), isRandom(accessType));
+ }
+
+ @Benchmark
+ @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS)
+ public void run() {
+ String[] params = dataTypeAndBlockKind.split("/");
+ String dataType = params[0];
+
+ run(dataType, data, traversalOrders, actualCheckSums);
+ }
+
+ @TearDown(Level.Iteration)
+ public void assertCheckSums() {
+ assertCheckSums(data, actualCheckSums);
+ }
+}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java
new file mode 100644
index 0000000000000..2441acab7d405
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+package org.elasticsearch.benchmark.h3;
+
+import org.elasticsearch.h3.H3;
+import org.openjdk.jmh.Main;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.concurrent.TimeUnit;
+
+@OutputTimeUnit(TimeUnit.SECONDS)
+@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
+@Measurement(iterations = 25, time = 1, timeUnit = TimeUnit.SECONDS)
+@Fork(1)
+public class H3Benchmark {
+
+ @Benchmark
+ public void pointToH3(H3State state, Blackhole bh) {
+ for (int i = 0; i < state.points.length; i++) {
+ for (int res = 0; res <= 15; res++) {
+ bh.consume(H3.geoToH3(state.points[i][0], state.points[i][1], res));
+ }
+ }
+ }
+
+ @Benchmark
+ public void h3Boundary(H3State state, Blackhole bh) {
+ for (int i = 0; i < state.h3.length; i++) {
+ bh.consume(H3.h3ToGeoBoundary(state.h3[i]));
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Main.main(args);
+ }
+}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java
new file mode 100644
index 0000000000000..5707e692a0750
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+package org.elasticsearch.benchmark.h3;
+
+import org.elasticsearch.h3.H3;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+
+import java.io.IOException;
+import java.util.Random;
+
+@State(Scope.Benchmark)
+public class H3State {
+
+ double[][] points = new double[1000][2];
+ long[] h3 = new long[1000];
+
+ @Setup(Level.Trial)
+ public void setupTrial() throws IOException {
+ Random random = new Random(1234);
+ for (int i = 0; i < points.length; i++) {
+ points[i][0] = random.nextDouble() * 180 - 90; // lat
+ points[i][1] = random.nextDouble() * 360 - 180; // lon
+ int res = random.nextInt(16); // resolution
+ h3[i] = H3.geoToH3(points[i][0], points[i][1], res);
+ }
+ }
+}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java
index 8c5de05a01648..d7a72615f4b93 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java
@@ -32,6 +32,7 @@
import org.elasticsearch.indices.SystemIndices;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.telemetry.metric.MeterRegistry;
+import org.elasticsearch.threadpool.DefaultBuiltInExecutorBuilders;
import org.elasticsearch.threadpool.ThreadPool;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
@@ -167,7 +168,7 @@ public void setUp() throws Exception {
.build();
Settings settings = Settings.builder().put("node.name", ShardsAvailabilityHealthIndicatorBenchmark.class.getSimpleName()).build();
- ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP);
+ ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP, new DefaultBuiltInExecutorBuilders());
ClusterService clusterService = new ClusterService(
Settings.EMPTY,
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java
index 5a27abe8be2a4..fe221ec980dc3 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java
@@ -186,6 +186,11 @@ public void setDocument(int docid) {
public boolean needs_score() {
return false;
}
+
+ @Override
+ public boolean needs_termStats() {
+ return false;
+ }
};
};
}
diff --git a/branches.json b/branches.json
index b852cd1fa5dbd..1d860501cbc87 100644
--- a/branches.json
+++ b/branches.json
@@ -7,9 +7,6 @@
{
"branch": "8.15"
},
- {
- "branch": "8.14"
- },
{
"branch": "7.17"
}
diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties
index efe2ff3449216..9036682bf0f0c 100644
--- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties
+++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties
@@ -1,7 +1,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip
+distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy
index 172bf9115d152..a89a26d2800d4 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy
@@ -15,17 +15,23 @@ abstract class AbstractGradleInternalPluginFuncTest extends AbstractJavaGradleFu
abstract Class getPluginClassUnderTest();
def setup() {
+ settingsFile.text = """
+ plugins {
+ id 'elasticsearch.java-toolchain'
+ }
+ """ + settingsFile.text
+
buildFile << """
import ${getPluginClassUnderTest().getName()}
-
+
plugins {
// bring in build-tools-internal onto the classpath
id 'elasticsearch.global-build-info'
}
// internally used plugins do not have a plugin id as they are
- // not intended to be used directly from build scripts
+ // not intended to be used directly from build scripts
plugins.apply(${getPluginClassUnderTest().getSimpleName()})
-
+
"""
}
}
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy
index d74dce6924e32..c63eca8680179 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy
@@ -19,22 +19,22 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
given:
someLibProject()
subProject("some-depending-lib") {
- buildFile << """
+ buildFile << """
plugins {
id 'elasticsearch.java-doc'
id 'java'
}
group = 'org.acme.depending'
-
+
dependencies {
implementation project(':some-lib')
}
"""
classFile('org.acme.depending.SomeDepending') << """
package org.acme.depending;
-
+
import org.acme.Something;
-
+
public class SomeDepending {
public Something createSomething() {
return new Something();
@@ -66,16 +66,17 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
def "sources of shadowed dependencies are added to projects javadoc"() {
given:
+ settingsFile.text = ""
someLibProject() << """version = 1.0"""
subProject("some-depending-lib") {
- buildFile << """
+ buildFile << """
plugins {
id 'elasticsearch.java-doc'
id 'com.github.johnrengelman.shadow' version '7.1.2'
id 'java'
}
group = 'org.acme.depending'
-
+
dependencies {
implementation project(':some-lib')
shadow project(':some-shadowed-lib')
@@ -83,9 +84,9 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
"""
classFile('org.acme.depending.SomeDepending') << """
package org.acme.depending;
-
+
import org.acme.Something;
-
+
public class SomeDepending {
public Something createSomething() {
return new Something();
@@ -94,9 +95,9 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
"""
classFile('org.acme.depending.SomeShadowedDepending') << """
package org.acme.depending;
-
+
import org.acme.shadowed.Shadowed;
-
+
public class SomeShadowedDepending {
public Shadowed createShadowed() {
return new Shadowed();
@@ -114,7 +115,7 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
"""
classFile('org.acme.shadowed.Shadowed') << """
package org.acme.shadowed;
-
+
public class Shadowed {
}
"""
@@ -145,22 +146,22 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
tasks.named("javadoc").configure { enabled = false }
"""
subProject("some-depending-lib") {
- buildFile << """
+ buildFile << """
plugins {
id 'elasticsearch.java-doc'
id 'java'
}
group = 'org.acme.depending'
-
+
dependencies {
implementation project(':some-lib')
}
"""
classFile('org.acme.depending.SomeDepending') << """
package org.acme.depending;
-
+
import org.acme.Something;
-
+
public class SomeDepending {
public Something createSomething() {
return new Something();
@@ -264,7 +265,7 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest {
classFile('org.acme.Something') << """
package org.acme;
-
+
public class Something {
}
"""
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy
index b365624b5749a..c6a4572cb8a86 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy
@@ -18,6 +18,7 @@ import org.elasticsearch.gradle.fixtures.AbstractGradleFuncTest
import org.elasticsearch.gradle.fixtures.AbstractGradleInternalPluginFuncTest
import org.elasticsearch.gradle.internal.conventions.precommit.LicenseHeadersPrecommitPlugin
import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin
+import org.gradle.testkit.runner.GradleRunner
import org.gradle.testkit.runner.TaskOutcome
@@ -211,6 +212,10 @@ class ThirdPartyAuditTaskFuncTest extends AbstractGradleInternalPluginFuncTest {
loggingDynamicType.toJar(targetFile(dir("${baseGroupFolderPath}/broken-log4j/0.0.1/"), "broken-log4j-0.0.1.jar"))
}
+ GradleRunner gradleRunner(Object... arguments) {
+ return super.gradleRunner(arguments).withEnvironment([RUNTIME_JAVA_HOME: System.getProperty("java.home")])
+ }
+
static File targetFile(File dir, String fileName) {
new File(dir, fileName)
}
diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy
index 94fa329af1715..5e96fa524268a 100644
--- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy
+++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy
@@ -12,6 +12,7 @@ import spock.lang.IgnoreIf
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest
+import org.gradle.testkit.runner.GradleRunner
import org.gradle.testkit.runner.TaskOutcome
@IgnoreIf({ os.isWindows() })
@@ -205,4 +206,8 @@ echo "Running elasticsearch \$0"
}
"""
}
+
+ GradleRunner gradleRunner(Object... arguments) {
+ return super.gradleRunner(arguments).withEnvironment([RUNTIME_JAVA_HOME: System.getProperty("java.home")])
+ }
}
diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle
index 6cb22dad9bc79..285c3a61b08c2 100644
--- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle
+++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle
@@ -168,8 +168,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') {
'-ea',
'-Djava.security.manager=allow',
'-Djava.locale.providers=SPI,COMPAT',
- '-Djava.library.path=' + testLibraryPath,
- '-Djna.library.path=' + testLibraryPath,
+ '-Des.nativelibs.path=' + testLibraryPath,
// TODO: only open these for mockito when it is modularized
'--add-opens=java.base/java.security.cert=ALL-UNNAMED',
'--add-opens=java.base/java.nio.channels=ALL-UNNAMED',
diff --git a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle
index f85ceed18604b..3400be77a588d 100644
--- a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle
+++ b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle
@@ -25,7 +25,6 @@ configure(allprojects) {
JvmVendorSpec.ORACLE :
JvmVendorSpec.matching(VersionProperties.bundledJdkVendor)
}
-
project.tasks.withType(Test).configureEach { Test test ->
if (BuildParams.getIsRuntimeJavaHomeSet()) {
test.executable = "${BuildParams.runtimeJavaHome}/bin/java" +
@@ -47,12 +46,4 @@ configure(allprojects) {
}
}
}
- project.plugins.withType(ThirdPartyAuditPrecommitPlugin) {
- project.getTasks().withType(ThirdPartyAuditTask.class).configureEach {
- if (BuildParams.getIsRuntimeJavaHomeSet() == false) {
- javaHome.set(launcher.map { it.metadata.installationPath.asFile.path })
- targetCompatibility.set(providers.provider(() -> JavaVersion.toVersion(launcher.map { it.metadata.javaRuntimeVersion }.get())))
- }
- }
- }
}
diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy
index e454d2ee38fff..658e2623cbbd7 100644
--- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy
+++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy
@@ -13,6 +13,7 @@ import org.elasticsearch.gradle.OS
import org.elasticsearch.gradle.internal.test.AntFixture
import org.gradle.api.file.FileSystemOperations
import org.gradle.api.file.ProjectLayout
+import org.gradle.api.provider.ProviderFactory
import org.gradle.api.tasks.Internal
import org.gradle.process.ExecOperations
@@ -24,14 +25,17 @@ abstract class AntFixtureStop extends LoggedExec implements FixtureStop {
AntFixture fixture
@Inject
- AntFixtureStop(ProjectLayout projectLayout, ExecOperations execOperations, FileSystemOperations fileSystemOperations) {
- super(projectLayout, execOperations, fileSystemOperations)
+ AntFixtureStop(ProjectLayout projectLayout,
+ ExecOperations execOperations,
+ FileSystemOperations fileSystemOperations,
+ ProviderFactory providerFactory) {
+ super(projectLayout, execOperations, fileSystemOperations, providerFactory)
}
void setFixture(AntFixture fixture) {
assert this.fixture == null
this.fixture = fixture;
- final Object pid = "${ -> this.fixture.pid }"
+ final Object pid = "${-> this.fixture.pid}"
onlyIf("pidFile exists") { fixture.pidFile.exists() }
doFirst {
logger.info("Shutting down ${fixture.name} with pid ${pid}")
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java
index b513fd7b93631..b27f480df4e63 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java
@@ -61,7 +61,7 @@ public void apply(Project target) {
: System.getenv("BUILDKITE_BUILD_NUMBER");
String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST");
if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) {
- File targetFile = target.file("build/" + buildNumber + ".tar.bz2");
+ File targetFile = calculateTargetFile(target, buildNumber);
File projectDir = target.getProjectDir();
File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/");
DevelocityConfiguration extension = target.getExtensions().getByType(DevelocityConfiguration.class);
@@ -86,9 +86,19 @@ public void apply(Project target) {
}
}
+ private File calculateTargetFile(Project target, String buildNumber) {
+ File uploadFile = target.file("build/" + buildNumber + ".tar.bz2");
+ int artifactIndex = 1;
+ while (uploadFile.exists()) {
+ uploadFile = target.file("build/" + buildNumber + "-" + artifactIndex++ + ".tar.bz2");
+ }
+ return uploadFile;
+ }
+
private List resolveProjectLogs(File projectDir) {
var projectDirFiles = getFileOperations().fileTree(projectDir);
projectDirFiles.include("**/*.hprof");
+ projectDirFiles.include("**/build/reports/configuration-cache/**");
projectDirFiles.include("**/build/test-results/**/*.xml");
projectDirFiles.include("**/build/testclusters/**");
projectDirFiles.include("**/build/testrun/*/temp/**");
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java
index f95d9d72a473f..a3b1dd9731591 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java
@@ -189,9 +189,7 @@ private static void configureNativeLibraryPath(Project project) {
var libraryPath = (Supplier) () -> TestUtil.getTestLibraryPath(nativeConfigFiles.getAsPath());
test.dependsOn(nativeConfigFiles);
- // we may use JNA or the JDK's foreign function api to load libraries, so we set both sysprops
- systemProperties.systemProperty("java.library.path", libraryPath);
- systemProperties.systemProperty("jna.library.path", libraryPath);
+ systemProperties.systemProperty("es.nativelibs.path", libraryPath);
});
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java
index e9e75a711a8ff..03b8f19d10b13 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java
@@ -43,17 +43,16 @@ void impl(String implName, Project implProject) {
});
String manifestTaskName = "generate" + capitalName + "ProviderManifest";
- Provider generatedResourcesDir = project.getLayout().getBuildDirectory().dir("generated-resources");
+ Provider generatedResourcesRoot = project.getLayout().getBuildDirectory().dir("generated-resources");
var generateProviderManifest = project.getTasks().register(manifestTaskName, GenerateProviderManifest.class);
generateProviderManifest.configure(t -> {
- t.getManifestFile().set(generatedResourcesDir.map(d -> d.file("LISTING.TXT")));
+ t.getManifestFile().set(generatedResourcesRoot.map(d -> d.dir(manifestTaskName).file("LISTING.TXT")));
t.getProviderImplClasspath().from(implConfig);
});
-
String implTaskName = "generate" + capitalName + "ProviderImpl";
var generateProviderImpl = project.getTasks().register(implTaskName, Sync.class);
generateProviderImpl.configure(t -> {
- t.into(generatedResourcesDir);
+ t.into(generatedResourcesRoot.map(d -> d.dir(implTaskName)));
t.into("IMPL-JARS/" + implName, spec -> {
spec.from(implConfig);
spec.from(generateProviderManifest);
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java
index b090f05c14c83..964784643936b 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java
@@ -26,15 +26,15 @@
import static java.util.Objects.requireNonNull;
public class BuildParams {
- private static File runtimeJavaHome;
+ private static Provider runtimeJavaHome;
private static Boolean isRuntimeJavaHomeSet;
private static List javaVersions;
private static JavaVersion minimumCompilerVersion;
private static JavaVersion minimumRuntimeVersion;
private static JavaVersion gradleJavaVersion;
- private static JavaVersion runtimeJavaVersion;
+ private static Provider runtimeJavaVersion;
private static Provider extends Action> javaToolChainSpec;
- private static String runtimeJavaDetails;
+ private static Provider runtimeJavaDetails;
private static Boolean inFipsJvm;
private static String gitRevision;
private static String gitOrigin;
@@ -58,7 +58,7 @@ public static void init(Consumer initializer) {
}
public static File getRuntimeJavaHome() {
- return value(runtimeJavaHome);
+ return value(runtimeJavaHome).get();
}
public static Boolean getIsRuntimeJavaHomeSet() {
@@ -82,11 +82,11 @@ public static JavaVersion getGradleJavaVersion() {
}
public static JavaVersion getRuntimeJavaVersion() {
- return value(runtimeJavaVersion);
+ return value(runtimeJavaVersion.get());
}
public static String getRuntimeJavaDetails() {
- return value(runtimeJavaDetails);
+ return value(runtimeJavaDetails.get());
}
public static Boolean isInFipsJvm() {
@@ -126,7 +126,7 @@ public static Boolean isCi() {
}
public static Boolean isGraalVmRuntime() {
- return value(runtimeJavaDetails.toLowerCase().contains("graalvm"));
+ return value(runtimeJavaDetails.get().toLowerCase().contains("graalvm"));
}
public static Integer getDefaultParallel() {
@@ -182,16 +182,18 @@ public void reset() {
});
}
- public void setRuntimeJavaHome(File runtimeJavaHome) {
- try {
- BuildParams.runtimeJavaHome = requireNonNull(runtimeJavaHome).getCanonicalFile();
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
+ public void setRuntimeJavaHome(Provider runtimeJavaHome) {
+ BuildParams.runtimeJavaHome = runtimeJavaHome.map(javaHome -> {
+ try {
+ return javaHome.getCanonicalFile();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
- public void setIsRuntimeJavaHomeSet(boolean isRutimeJavaHomeSet) {
- BuildParams.isRuntimeJavaHomeSet = isRutimeJavaHomeSet;
+ public void setIsRuntimeJavaHomeSet(boolean isRuntimeJavaHomeSet) {
+ BuildParams.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet;
}
public void setJavaVersions(List javaVersions) {
@@ -210,11 +212,11 @@ public void setGradleJavaVersion(JavaVersion gradleJavaVersion) {
BuildParams.gradleJavaVersion = requireNonNull(gradleJavaVersion);
}
- public void setRuntimeJavaVersion(JavaVersion runtimeJavaVersion) {
+ public void setRuntimeJavaVersion(Provider runtimeJavaVersion) {
BuildParams.runtimeJavaVersion = requireNonNull(runtimeJavaVersion);
}
- public void setRuntimeJavaDetails(String runtimeJavaDetails) {
+ public void setRuntimeJavaDetails(Provider runtimeJavaDetails) {
BuildParams.runtimeJavaDetails = runtimeJavaDetails;
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
index e61bbefc9a973..b287815854098 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java
@@ -8,6 +8,7 @@
package org.elasticsearch.gradle.internal.info;
import org.apache.commons.io.IOUtils;
+import org.elasticsearch.gradle.VersionProperties;
import org.elasticsearch.gradle.internal.BwcVersions;
import org.elasticsearch.gradle.internal.conventions.info.GitInfo;
import org.elasticsearch.gradle.internal.conventions.info.ParallelDetector;
@@ -31,7 +32,6 @@
import org.gradle.internal.jvm.inspection.JvmMetadataDetector;
import org.gradle.internal.jvm.inspection.JvmVendor;
import org.gradle.jvm.toolchain.JavaLanguageVersion;
-import org.gradle.jvm.toolchain.JavaLauncher;
import org.gradle.jvm.toolchain.JavaToolchainService;
import org.gradle.jvm.toolchain.JavaToolchainSpec;
import org.gradle.jvm.toolchain.JvmVendorSpec;
@@ -48,10 +48,8 @@
import java.nio.file.Files;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
-import java.util.Arrays;
import java.util.List;
import java.util.Locale;
-import java.util.Optional;
import java.util.Random;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
@@ -98,9 +96,11 @@ public void apply(Project project) {
JavaVersion minimumCompilerVersion = JavaVersion.toVersion(getResourceContents("/minimumCompilerVersion"));
JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion"));
- Optional selectedRuntimeJavaHome = findRuntimeJavaHome();
- File actualRuntimeJavaHome = selectedRuntimeJavaHome.orElse(Jvm.current().getJavaHome());
- boolean isRuntimeJavaHomeSet = selectedRuntimeJavaHome.isPresent();
+ Provider explicitRuntimeJavaHome = findRuntimeJavaHome();
+ boolean isExplicitRuntimeJavaHomeSet = explicitRuntimeJavaHome.isPresent();
+ Provider actualRuntimeJavaHome = isExplicitRuntimeJavaHomeSet
+ ? explicitRuntimeJavaHome
+ : resolveJavaHomeFromToolChainService(VersionProperties.getBundledJdkMajorVersion());
GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir());
@@ -108,16 +108,22 @@ public void apply(Project project) {
params.reset();
params.setRuntimeJavaHome(actualRuntimeJavaHome);
params.setJavaToolChainSpec(resolveToolchainSpecFromEnv());
+ Provider runtimeJdkMetaData = actualRuntimeJavaHome.map(
+ runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome))
+ );
params.setRuntimeJavaVersion(
- determineJavaVersion(
- "runtime java.home",
- actualRuntimeJavaHome,
- isRuntimeJavaHomeSet ? minimumRuntimeVersion : Jvm.current().getJavaVersion()
+ actualRuntimeJavaHome.map(
+ javaHome -> determineJavaVersion(
+ "runtime java.home",
+ javaHome,
+ isExplicitRuntimeJavaHomeSet
+ ? minimumRuntimeVersion
+ : JavaVersion.toVersion(VersionProperties.getBundledJdkMajorVersion())
+ )
)
);
- params.setIsRuntimeJavaHomeSet(isRuntimeJavaHomeSet);
- JvmInstallationMetadata runtimeJdkMetaData = metadataDetector.getMetadata(getJavaInstallation(actualRuntimeJavaHome));
- params.setRuntimeJavaDetails(formatJavaVendorDetails(runtimeJdkMetaData));
+ params.setIsRuntimeJavaHomeSet(isExplicitRuntimeJavaHomeSet);
+ params.setRuntimeJavaDetails(runtimeJdkMetaData.map(m -> formatJavaVendorDetails(m)));
params.setJavaVersions(getAvailableJavaVersions());
params.setMinimumCompilerVersion(minimumCompilerVersion);
params.setMinimumRuntimeVersion(minimumRuntimeVersion);
@@ -300,62 +306,30 @@ private static void assertMinimumCompilerVersion(JavaVersion minimumCompilerVers
}
}
- private Optional findRuntimeJavaHome() {
+ private Provider findRuntimeJavaHome() {
String runtimeJavaProperty = System.getProperty("runtime.java");
if (runtimeJavaProperty != null) {
- return Optional.of(resolveJavaHomeFromToolChainService(runtimeJavaProperty));
+ return resolveJavaHomeFromToolChainService(runtimeJavaProperty);
}
- String env = System.getenv("RUNTIME_JAVA_HOME");
- if (env != null) {
- return Optional.of(new File(env));
+ if (System.getenv("RUNTIME_JAVA_HOME") != null) {
+ return providers.provider(() -> new File(System.getenv("RUNTIME_JAVA_HOME")));
}
// fall back to tool chain if set.
- env = System.getenv("JAVA_TOOLCHAIN_HOME");
- return env == null ? Optional.empty() : Optional.of(new File(env));
- }
-
- @NotNull
- private String resolveJavaHomeFromEnvVariable(String javaHomeEnvVar) {
- Provider javaHomeNames = providers.gradleProperty("org.gradle.java.installations.fromEnv");
- // Provide a useful error if we're looking for a Java home version that we haven't told Gradle about yet
- Arrays.stream(javaHomeNames.get().split(","))
- .filter(s -> s.equals(javaHomeEnvVar))
- .findFirst()
- .orElseThrow(
- () -> new GradleException(
- "Environment variable '"
- + javaHomeEnvVar
- + "' is not registered with Gradle installation supplier. Ensure 'org.gradle.java.installations.fromEnv' is "
- + "updated in gradle.properties file."
- )
- );
- String versionedJavaHome = System.getenv(javaHomeEnvVar);
- if (versionedJavaHome == null) {
- final String exceptionMessage = String.format(
- Locale.ROOT,
- "$%s must be set to build Elasticsearch. "
- + "Note that if the variable was just set you "
- + "might have to run `./gradlew --stop` for "
- + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details.",
- javaHomeEnvVar
- );
- throw new GradleException(exceptionMessage);
- }
- return versionedJavaHome;
+ String env = System.getenv("JAVA_TOOLCHAIN_HOME");
+ return providers.provider(() -> {
+ if (env == null) {
+ return null;
+ }
+ return new File(env);
+ });
}
@NotNull
- private File resolveJavaHomeFromToolChainService(String version) {
+ private Provider resolveJavaHomeFromToolChainService(String version) {
Property value = objectFactory.property(JavaLanguageVersion.class).value(JavaLanguageVersion.of(version));
- Provider javaLauncherProvider = toolChainService.launcherFor(javaToolchainSpec -> {
- javaToolchainSpec.getLanguageVersion().value(value);
- });
- return javaLauncherProvider.get().getMetadata().getInstallationPath().getAsFile();
- }
-
- private static String getJavaHomeEnvVarName(String version) {
- return "JAVA" + version + "_HOME";
+ return toolChainService.launcherFor(javaToolchainSpec -> javaToolchainSpec.getLanguageVersion().value(value))
+ .map(launcher -> launcher.getMetadata().getInstallationPath().getAsFile());
}
public static String getResourceContents(String resourcePath) {
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java
index c52ea9aaeb6f5..533066168c604 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java
@@ -102,7 +102,7 @@ public class LicenseAnalyzer {
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE\\.
+ SOFTWARE\\.?
""").replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)),
new LicenseMatcher(
"MIT-0",
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java
index 1fc030be42480..9e40d96438e48 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java
@@ -66,10 +66,8 @@ public TaskProvider extends Task> createTask(Project project) {
)
);
t.dependsOn(resourcesTask);
- if (BuildParams.getIsRuntimeJavaHomeSet()) {
- t.getJavaHome().set(project.provider(BuildParams::getRuntimeJavaHome).map(File::getPath));
- }
t.getTargetCompatibility().set(project.provider(BuildParams::getRuntimeJavaVersion));
+ t.getJavaHome().set(project.provider(BuildParams::getRuntimeJavaHome).map(File::getPath));
t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile());
t.getJdkJarHellClasspath().from(jdkJarHellConfig);
t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly));
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java
index 08abb02ea831e..ec79fe20492e1 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java
@@ -52,7 +52,7 @@ public void apply(Project project) {
project.getTasks().register("extractCurrentVersions", ExtractCurrentVersionsTask.class);
project.getTasks().register("tagVersions", TagVersionsTask.class);
- project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class);
+ project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class, t -> t.setThisVersion(version));
final FileTree yamlFiles = projectDirectory.dir("docs/changelog")
.getAsFileTree()
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java
index 15e0a0cc345d5..17761e5183b31 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java
@@ -14,6 +14,7 @@
import com.github.javaparser.ast.expr.NameExpr;
import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter;
+import org.elasticsearch.gradle.Version;
import org.gradle.api.tasks.TaskAction;
import org.gradle.api.tasks.options.Option;
import org.gradle.initialization.layout.BuildLayout;
@@ -28,6 +29,8 @@
public class SetCompatibleVersionsTask extends AbstractVersionsTask {
+ private Version thisVersion;
+ private Version releaseVersion;
private Map versionIds = Map.of();
@Inject
@@ -35,21 +38,35 @@ public SetCompatibleVersionsTask(BuildLayout layout) {
super(layout);
}
+ public void setThisVersion(Version version) {
+ thisVersion = version;
+ }
+
@Option(option = "version-id", description = "Version id used for the release. Of the form :.")
public void versionIds(List version) {
this.versionIds = splitVersionIds(version);
}
+ @Option(option = "release", description = "The version being released")
+ public void releaseVersion(String version) {
+ releaseVersion = Version.fromString(version);
+ }
+
@TaskAction
public void executeTask() throws IOException {
if (versionIds.isEmpty()) {
throw new IllegalArgumentException("No version ids specified");
}
+
+ if (releaseVersion.getMajor() < thisVersion.getMajor()) {
+ // don't need to update CCS version - this is for a different major
+ return;
+ }
+
Integer transportVersion = versionIds.get(TRANSPORT_VERSION_TYPE);
if (transportVersion == null) {
throw new IllegalArgumentException("TransportVersion id not specified");
}
-
Path versionJava = rootDir.resolve(TRANSPORT_VERSIONS_FILE_PATH);
CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava));
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java
index 96fde95d0dd17..965f3964c9a38 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java
@@ -11,7 +11,6 @@
import org.elasticsearch.gradle.Architecture;
import org.elasticsearch.gradle.ElasticsearchDistribution;
-import java.io.File;
import java.util.Locale;
public class TestUtil {
@@ -19,8 +18,7 @@ public class TestUtil {
public static String getTestLibraryPath(String nativeLibsDir) {
String arch = Architecture.current().toString().toLowerCase(Locale.ROOT);
String platform = String.format(Locale.ROOT, "%s-%s", ElasticsearchDistribution.CURRENT_PLATFORM, arch);
- String existingLibraryPath = System.getProperty("java.library.path");
- return String.format(Locale.ROOT, "%s/%s%c%s", nativeLibsDir, platform, File.pathSeparatorChar, existingLibraryPath);
+ return String.format(Locale.ROOT, "%s/%s", nativeLibsDir, platform);
}
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/SourceDirectoryCommandLineArgumentProvider.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/SourceDirectoryCommandLineArgumentProvider.java
new file mode 100644
index 0000000000000..30141f021935b
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/SourceDirectoryCommandLineArgumentProvider.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal.util;
+
+import org.gradle.api.file.Directory;
+import org.gradle.api.tasks.InputDirectory;
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
+import org.gradle.process.CommandLineArgumentProvider;
+
+import java.util.Arrays;
+
+public class SourceDirectoryCommandLineArgumentProvider implements CommandLineArgumentProvider {
+
+ private final Directory sourceDirectory;
+
+ public SourceDirectoryCommandLineArgumentProvider(Directory sourceDirectory) {
+ this.sourceDirectory = sourceDirectory;
+ }
+
+ public Iterable asArguments() {
+ return Arrays.asList("-s", sourceDirectory.getAsFile().getAbsolutePath());
+ }
+
+ @InputDirectory
+ @PathSensitive(PathSensitivity.RELATIVE)
+ public Directory getSourceDirectory() {
+ return sourceDirectory;
+ }
+}
diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion
index f7b1c8ff61774..8d04a0f38fab0 100644
--- a/build-tools-internal/src/main/resources/minimumGradleVersion
+++ b/build-tools-internal/src/main/resources/minimumGradleVersion
@@ -1 +1 @@
-8.9
\ No newline at end of file
+8.10
\ No newline at end of file
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java b/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java
index 6087482db278d..3a425d11ccf17 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java
@@ -17,6 +17,8 @@
import org.gradle.api.provider.ListProperty;
import org.gradle.api.provider.MapProperty;
import org.gradle.api.provider.Property;
+import org.gradle.api.provider.Provider;
+import org.gradle.api.provider.ProviderFactory;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.Internal;
import org.gradle.api.tasks.Optional;
@@ -92,17 +94,45 @@ public abstract class LoggedExec extends DefaultTask implements FileSystemOperat
private String output;
@Inject
- public LoggedExec(ProjectLayout projectLayout, ExecOperations execOperations, FileSystemOperations fileSystemOperations) {
+ public LoggedExec(
+ ProjectLayout projectLayout,
+ ExecOperations execOperations,
+ FileSystemOperations fileSystemOperations,
+ ProviderFactory providerFactory
+ ) {
this.projectLayout = projectLayout;
this.execOperations = execOperations;
this.fileSystemOperations = fileSystemOperations;
getWorkingDir().convention(projectLayout.getProjectDirectory().getAsFile());
// For now mimic default behaviour of Gradle Exec task here
- getEnvironment().putAll(System.getenv());
+ setupDefaultEnvironment(providerFactory);
getCaptureOutput().convention(false);
getSpoolOutput().convention(false);
}
+ /**
+ * We explicitly configure the environment variables that are passed to the executed process.
+ * This is required to make sure that the build cache and Gradle configuration cache is correctly configured
+ * can be reused across different build invocations.
+ * */
+ private void setupDefaultEnvironment(ProviderFactory providerFactory) {
+ getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("BUILDKITE"));
+ getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("GRADLE_BUILD_CACHE"));
+ getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("VAULT"));
+ Provider javaToolchainHome = providerFactory.environmentVariable("JAVA_TOOLCHAIN_HOME");
+ if (javaToolchainHome.isPresent()) {
+ getEnvironment().put("JAVA_TOOLCHAIN_HOME", javaToolchainHome);
+ }
+ Provider javaRuntimeHome = providerFactory.environmentVariable("RUNTIME_JAVA_HOME");
+ if (javaRuntimeHome.isPresent()) {
+ getEnvironment().put("RUNTIME_JAVA_HOME", javaRuntimeHome);
+ }
+ Provider path = providerFactory.environmentVariable("PATH");
+ if (path.isPresent()) {
+ getEnvironment().put("PATH", path);
+ }
+ }
+
@TaskAction
public void run() {
boolean spoolOutput = getSpoolOutput().get();
diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java
index a1da860abe26a..9593a281686e7 100644
--- a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java
+++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java
@@ -22,6 +22,9 @@ public void apply(Project project) {
test.systemProperty("tests.gradle", true);
test.systemProperty("tests.task", test.getPath());
+ // Flag is required for later Java versions since our tests use a custom security manager
+ test.jvmArgs("-Djava.security.manager=allow");
+
SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider();
// don't track these as inputs since they contain absolute paths and break cache relocatability
nonInputProperties.systemProperty("gradle.dist.lib", gradle.getGradleHomeDir().getAbsolutePath() + "/lib");
diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
index 41f6f445f58ec..5ea970c533474 100644
--- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
+++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy
@@ -13,6 +13,7 @@ import org.elasticsearch.gradle.internal.test.BuildConfigurationAwareGradleRunne
import org.elasticsearch.gradle.internal.test.InternalAwareGradleRunner
import org.elasticsearch.gradle.internal.test.NormalizeOutputGradleRunner
import org.elasticsearch.gradle.internal.test.TestResultExtension
+import org.gradle.internal.component.external.model.ComponentVariant
import org.gradle.testkit.runner.BuildResult
import org.gradle.testkit.runner.GradleRunner
import org.junit.Rule
@@ -22,6 +23,7 @@ import spock.lang.TempDir
import java.lang.management.ManagementFactory
import java.nio.file.Files
+import java.io.File
import java.nio.file.Path
import java.util.jar.JarEntry
import java.util.jar.JarOutputStream
diff --git a/catalog-info.yaml b/catalog-info.yaml
index dfeeae51c1b3a..e57841c9de268 100644
--- a/catalog-info.yaml
+++ b/catalog-info.yaml
@@ -125,7 +125,7 @@ spec:
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true"
SLACK_NOTIFICATIONS_CHANNEL: "#lucene"
SLACK_NOTIFICATIONS_ALL_BRANCHES: "true"
- branch_configuration: lucene_snapshot
+ branch_configuration: lucene_snapshot lucene_snapshot_10
default_branch: lucene_snapshot
teams:
elasticsearch-team: {}
@@ -142,6 +142,10 @@ spec:
branch: lucene_snapshot
cronline: "0 2 * * * America/New_York"
message: "Builds a new lucene snapshot 1x per day"
+ Periodically on lucene_snapshot_10:
+ branch: lucene_snapshot_10
+ cronline: "0 2 * * * America/New_York"
+ message: "Builds a new lucene snapshot 1x per day"
---
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
apiVersion: backstage.io/v1alpha1
@@ -169,7 +173,7 @@ spec:
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true"
SLACK_NOTIFICATIONS_CHANNEL: "#lucene"
SLACK_NOTIFICATIONS_ALL_BRANCHES: "true"
- branch_configuration: lucene_snapshot
+ branch_configuration: lucene_snapshot lucene_snapshot_10
default_branch: lucene_snapshot
teams:
elasticsearch-team: {}
@@ -186,6 +190,10 @@ spec:
branch: lucene_snapshot
cronline: "0 6 * * * America/New_York"
message: "Merges main into lucene_snapshot branch 1x per day"
+ Periodically on lucene_snapshot_10:
+ branch: lucene_snapshot_10
+ cronline: "0 6 * * * America/New_York"
+ message: "Merges main into lucene_snapshot_10 branch 1x per day"
---
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
apiVersion: backstage.io/v1alpha1
@@ -213,7 +221,7 @@ spec:
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true"
SLACK_NOTIFICATIONS_CHANNEL: "#lucene"
SLACK_NOTIFICATIONS_ALL_BRANCHES: "true"
- branch_configuration: lucene_snapshot
+ branch_configuration: lucene_snapshot lucene_snapshot_10
default_branch: lucene_snapshot
teams:
elasticsearch-team: {}
@@ -230,6 +238,10 @@ spec:
branch: lucene_snapshot
cronline: "0 9,12,15,18 * * * America/New_York"
message: "Runs tests against lucene_snapshot branch several times per day"
+ Periodically on lucene_snapshot_10:
+ branch: lucene_snapshot_10
+ cronline: "0 9,12,15,18 * * * America/New_York"
+ message: "Runs tests against lucene_snapshot_10 branch several times per day"
---
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json
apiVersion: backstage.io/v1alpha1
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java
index 9896fd6c84599..01ec6c118bf24 100644
--- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java
@@ -16,9 +16,9 @@
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.update.UpdateResponse;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.plugin.noop.NoopPlugin;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.TransportService;
diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java
index 790b6bfd6deca..871cdb860a9a9 100644
--- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java
+++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java
@@ -13,9 +13,9 @@
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.plugin.noop.NoopPlugin;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.InternalAggregations;
diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile
index 32f35b05015b9..2a2a77a6df820 100644
--- a/distribution/docker/src/docker/Dockerfile
+++ b/distribution/docker/src/docker/Dockerfile
@@ -22,7 +22,7 @@
<% if (docker_base == 'iron_bank') { %>
ARG BASE_REGISTRY=registry1.dso.mil
ARG BASE_IMAGE=ironbank/redhat/ubi/ubi9
-ARG BASE_TAG=9.3
+ARG BASE_TAG=9.4
<% } %>
################################################################################
diff --git a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml
index 38ce16a413af2..f4364c5008c09 100644
--- a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml
+++ b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml
@@ -14,7 +14,7 @@ tags:
# Build args passed to Dockerfile ARGs
args:
BASE_IMAGE: "redhat/ubi/ubi9"
- BASE_TAG: "9.3"
+ BASE_TAG: "9.4"
# Docker image labels
labels:
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java
index 7b904d4cb5a89..bea7fbb7f63e8 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java
@@ -32,6 +32,7 @@
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Locale;
+import java.util.concurrent.atomic.AtomicBoolean;
/**
* The main CLI for running Elasticsearch.
@@ -44,6 +45,8 @@ class ServerCli extends EnvironmentAwareCommand {
private final OptionSpecBuilder quietOption;
private final OptionSpec enrollmentTokenOption;
+ // flag for indicating shutdown has begun. we use an AtomicBoolean to double as a synchronization object
+ private final AtomicBoolean shuttingDown = new AtomicBoolean(false);
private volatile ServerProcess server;
// visible for testing
@@ -98,7 +101,14 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce
syncPlugins(terminal, env, processInfo);
ServerArgs args = createArgs(options, env, secrets, processInfo);
- this.server = startServer(terminal, processInfo, args);
+ synchronized (shuttingDown) {
+ // if we are shutting down there is no reason to start the server
+ if (shuttingDown.get()) {
+ terminal.println("CLI is shutting down, skipping starting server process");
+ return;
+ }
+ this.server = startServer(terminal, processInfo, args);
+ }
}
if (options.has(daemonizeOption)) {
@@ -233,8 +243,11 @@ private ServerArgs createArgs(OptionSet options, Environment env, SecureSettings
@Override
public void close() throws IOException {
- if (server != null) {
- server.stop();
+ synchronized (shuttingDown) {
+ shuttingDown.set(true);
+ if (server != null) {
+ server.stop();
+ }
}
}
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
index 2a89f18209d11..94e2d538c0ad0 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java
@@ -10,11 +10,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
-import org.elasticsearch.core.SuppressForbidden;
-import java.io.File;
-import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -25,7 +21,6 @@ final class SystemJvmOptions {
static List systemJvmOptions(Settings nodeSettings, final Map sysprops) {
String distroType = sysprops.get("es.distribution.type");
boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot");
- String libraryPath = findLibraryPath(sysprops);
return Stream.concat(
Stream.of(
@@ -73,8 +68,6 @@ static List systemJvmOptions(Settings nodeSettings, final Map maybeWorkaroundG1Bug() {
}
return Stream.of();
}
-
- private static String findLibraryPath(Map sysprops) {
- // working dir is ES installation, so we use relative path here
- Path platformDir = Paths.get("lib", "platform");
- String existingPath = sysprops.get("java.library.path");
- assert existingPath != null;
-
- String osname = sysprops.get("os.name");
- String os;
- if (osname.startsWith("Windows")) {
- os = "windows";
- } else if (osname.startsWith("Linux")) {
- os = "linux";
- } else if (osname.startsWith("Mac OS")) {
- os = "darwin";
- } else {
- os = "unsupported_os[" + osname + "]";
- }
- String archname = sysprops.get("os.arch");
- String arch;
- if (archname.equals("amd64") || archname.equals("x86_64")) {
- arch = "x64";
- } else if (archname.equals("aarch64")) {
- arch = archname;
- } else {
- arch = "unsupported_arch[" + archname + "]";
- }
- return platformDir.resolve(os + "-" + arch).toAbsolutePath() + getPathSeparator() + existingPath;
- }
-
- @SuppressForbidden(reason = "no way to get path separator with nio")
- private static String getPathSeparator() {
- return File.pathSeparator;
- }
}
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java
index 87b7894a9135a..fc889f036a795 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java
@@ -17,7 +17,6 @@
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringReader;
-import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
@@ -30,12 +29,10 @@
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
-import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
@@ -44,14 +41,7 @@
@WithoutSecurityManager
public class JvmOptionsParserTests extends ESTestCase {
- private static final Map TEST_SYSPROPS = Map.of(
- "os.name",
- "Linux",
- "os.arch",
- "aarch64",
- "java.library.path",
- "/usr/lib"
- );
+ private static final Map TEST_SYSPROPS = Map.of("os.name", "Linux", "os.arch", "aarch64");
public void testSubstitution() {
final List jvmOptions = JvmOptionsParser.substitutePlaceholders(
@@ -390,40 +380,4 @@ public void testCommandLineDistributionType() {
final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops);
assertThat(jvmOptions, hasItem("-Des.distribution.type=testdistro"));
}
-
- public void testLibraryPath() {
- assertLibraryPath("Mac OS", "aarch64", "darwin-aarch64");
- assertLibraryPath("Mac OS", "amd64", "darwin-x64");
- assertLibraryPath("Mac OS", "x86_64", "darwin-x64");
- assertLibraryPath("Linux", "aarch64", "linux-aarch64");
- assertLibraryPath("Linux", "amd64", "linux-x64");
- assertLibraryPath("Linux", "x86_64", "linux-x64");
- assertLibraryPath("Windows", "amd64", "windows-x64");
- assertLibraryPath("Windows", "x86_64", "windows-x64");
- assertLibraryPath("Unknown", "aarch64", "unsupported_os[Unknown]-aarch64");
- assertLibraryPath("Mac OS", "Unknown", "darwin-unsupported_arch[Unknown]");
- }
-
- private void assertLibraryPath(String os, String arch, String expected) {
- String existingPath = "/usr/lib";
- var sysprops = Map.of("os.name", os, "os.arch", arch, "java.library.path", existingPath);
- final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops);
- Map options = new HashMap<>();
- for (var jvmOption : jvmOptions) {
- if (jvmOption.startsWith("-D")) {
- String[] parts = jvmOption.substring(2).split("=");
- assert parts.length == 2;
- options.put(parts[0], parts[1]);
- }
- }
- String separator = FileSystems.getDefault().getSeparator();
- assertThat(
- options,
- hasEntry(equalTo("java.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath)))
- );
- assertThat(
- options,
- hasEntry(equalTo("jna.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath)))
- );
- }
}
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java
index 38a64a778fc27..e603790051c0c 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java
@@ -36,6 +36,8 @@
import java.util.List;
import java.util.Locale;
import java.util.Optional;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
@@ -50,6 +52,7 @@
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.matchesRegex;
import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.sameInstance;
public class ServerCliTests extends CommandTestCase {
@@ -383,6 +386,52 @@ public void testSecureSettingsLoaderWithNullPassword() throws Exception {
assertEquals("", loader.password);
}
+ public void testProcessCreationRace() throws Exception {
+ for (int i = 0; i < 10; ++i) {
+ CyclicBarrier raceStart = new CyclicBarrier(2);
+ TestServerCli cli = new TestServerCli() {
+ @Override
+ void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception {
+ super.syncPlugins(terminal, env, processInfo);
+ raceStart.await();
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ raceStart.await();
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ throw new AssertionError(ie);
+ } catch (BrokenBarrierException e) {
+ throw new AssertionError(e);
+ }
+ super.close();
+ }
+ };
+ Thread closeThread = new Thread(() -> {
+ try {
+ cli.close();
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+ });
+ closeThread.start();
+ cli.main(new String[] {}, terminal, new ProcessInfo(sysprops, envVars, esHomeDir));
+ closeThread.join();
+
+ if (cli.getServer() == null) {
+ // close won the race, so server should never have been started
+ assertThat(cli.startServerCalled, is(false));
+ } else {
+ // creation won the race, so check we correctly waited on it and stopped
+ assertThat(cli.getServer(), sameInstance(mockServer));
+ assertThat(mockServer.waitForCalled, is(true));
+ assertThat(mockServer.stopCalled, is(true));
+ }
+ }
+ }
+
private MockSecureSettingsLoader loadWithMockSecureSettingsLoader() throws Exception {
var loader = new MockSecureSettingsLoader();
this.mockSecureSettingsLoader = loader;
@@ -465,9 +514,9 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce
}
private class MockServerProcess extends ServerProcess {
- boolean detachCalled = false;
- boolean waitForCalled = false;
- boolean stopCalled = false;
+ volatile boolean detachCalled = false;
+ volatile boolean waitForCalled = false;
+ volatile boolean stopCalled = false;
MockServerProcess() {
super(null, null);
@@ -505,6 +554,8 @@ void reset() {
}
private class TestServerCli extends ServerCli {
+ boolean startServerCalled = false;
+
@Override
protected Command loadTool(String toolname, String libs) {
if (toolname.equals("auto-configure-node")) {
@@ -551,20 +602,21 @@ protected SecureSettingsLoader secureSettingsLoader(Environment env) {
return new KeystoreSecureSettingsLoader();
}
+
+ @Override
+ protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws Exception {
+ startServerCalled = true;
+ if (argsValidator != null) {
+ argsValidator.accept(args);
+ }
+ mockServer.reset();
+ return mockServer;
+ }
}
@Override
protected Command newCommand() {
- return new TestServerCli() {
- @Override
- protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) {
- if (argsValidator != null) {
- argsValidator.accept(args);
- }
- mockServer.reset();
- return mockServer;
- }
- };
+ return new TestServerCli();
}
static class MockSecureSettingsLoader implements SecureSettingsLoader {
diff --git a/docs/changelog/101373.yaml b/docs/changelog/101373.yaml
deleted file mode 100644
index 53b5680301c79..0000000000000
--- a/docs/changelog/101373.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 101373
-summary: Adding aggregations support for the `_ignored` field
-area: Search
-type: feature
-issues:
- - 59946
diff --git a/docs/changelog/103374.yaml b/docs/changelog/103374.yaml
deleted file mode 100644
index fcdee9185eb92..0000000000000
--- a/docs/changelog/103374.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-pr: 103374
-summary: Cut over stored fields to ZSTD for compression
-area: Search
-type: enhancement
-issues: []
-highlight:
- title: Stored fields are now compressed with ZStandard instead of LZ4/DEFLATE
- body: |-
- Stored fields are now compressed by splitting documents into blocks, which
- are then compressed independently with ZStandard. `index.codec: default`
- (default) uses blocks of at most 14kB or 128 documents compressed with level
- 0, while `index.codec: best_compression` uses blocks of at most 240kB or
- 2048 documents compressed at level 3. On most datasets that we tested
- against, this yielded storage improvements in the order of 10%, slightly
- faster indexing and similar retrieval latencies.
- notable: true
diff --git a/docs/changelog/105792.yaml b/docs/changelog/105792.yaml
deleted file mode 100644
index b9190e60cc96d..0000000000000
--- a/docs/changelog/105792.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-pr: 105792
-summary: "Change `skip_unavailable` remote cluster setting default value to true"
-area: Search
-type: breaking
-issues: []
-breaking:
- title: "Change `skip_unavailable` remote cluster setting default value to true"
- area: Cluster and node setting
- details: The default value of the `skip_unavailable` setting is now set to true.
- All existing and future remote clusters that do not define this setting will use the new default.
- This setting only affects cross-cluster searches using the _search or _async_search API.
- impact: Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless
- skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API.
- Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked
- as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a
- search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be
- set explicitly.
- notable: true
diff --git a/docs/changelog/105829.yaml b/docs/changelog/105829.yaml
deleted file mode 100644
index d9f8439e4b887..0000000000000
--- a/docs/changelog/105829.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 105829
-summary: Log shard movements
-area: Allocation
-type: enhancement
-issues: []
diff --git a/docs/changelog/106252.yaml b/docs/changelog/106252.yaml
deleted file mode 100644
index 5e3f084632b9d..0000000000000
--- a/docs/changelog/106252.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 106252
-summary: Add min/max range of the `event.ingested` field to cluster state for searchable
- snapshots
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/106486.yaml b/docs/changelog/106486.yaml
deleted file mode 100644
index b33df50780e02..0000000000000
--- a/docs/changelog/106486.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-pr: 106486
-summary: Create custom parser for ISO-8601 datetimes
-area: Infra/Core
-type: enhancement
-issues:
- - 102063
-highlight:
- title: New custom parser for ISO-8601 datetimes
- body: |-
- This introduces a new custom parser for ISO-8601 datetimes, for the `iso8601`, `strict_date_optional_time`, and
- `strict_date_optional_time_nanos` built-in date formats. This provides a performance improvement over the
- default Java date-time parsing. Whilst it maintains much of the same behaviour,
- the new parser does not accept nonsensical date-time strings that have multiple fractional seconds fields
- or multiple timezone specifiers. If the new parser fails to parse a string, it will then use the previous parser
- to parse it. If a large proportion of the input data consists of these invalid strings, this may cause
- a small performance degradation. If you wish to force the use of the old parsers regardless,
- set the JVM property `es.datetime.java_time_parsers=true` on all ES nodes.
diff --git a/docs/changelog/106553.yaml b/docs/changelog/106553.yaml
deleted file mode 100644
index 0ec5b1bb02da8..0000000000000
--- a/docs/changelog/106553.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 106553
-summary: Add support for hiragana_uppercase & katakana_uppercase token filters in kuromoji analysis plugin
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/106591.yaml b/docs/changelog/106591.yaml
deleted file mode 100644
index 6a7814cb9cede..0000000000000
--- a/docs/changelog/106591.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 106591
-summary: Make dense vector field type updatable
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/106820.yaml b/docs/changelog/106820.yaml
deleted file mode 100644
index d854e3984c13d..0000000000000
--- a/docs/changelog/106820.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 106820
-summary: Add a capabilities API to check node and cluster capabilities
-area: Infra/REST API
-type: feature
-issues: []
diff --git a/docs/changelog/107081.yaml b/docs/changelog/107081.yaml
deleted file mode 100644
index 2acd2f919b476..0000000000000
--- a/docs/changelog/107081.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107081
-summary: Implement synthetic source support for range fields
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/107088.yaml b/docs/changelog/107088.yaml
deleted file mode 100644
index 01a926f185eea..0000000000000
--- a/docs/changelog/107088.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107088
-summary: Introduce role description field
-area: Authorization
-type: enhancement
-issues: []
diff --git a/docs/changelog/107191.yaml b/docs/changelog/107191.yaml
deleted file mode 100644
index 5ef6297c0f3f1..0000000000000
--- a/docs/changelog/107191.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-pr: 107191
-summary: Stricter failure handling in multi-repo get-snapshots request handling
-area: Snapshot/Restore
-type: bug
-issues: []
-highlight:
- title: Stricter failure handling in multi-repo get-snapshots request handling
- body: |
- If a multi-repo get-snapshots request encounters a failure in one of the
- targeted repositories then earlier versions of Elasticsearch would proceed
- as if the faulty repository did not exist, except for a per-repository
- failure report in a separate section of the response body. This makes it
- impossible to paginate the results properly in the presence of failures. In
- versions 8.15.0 and later this API's failure handling behaviour has been
- made stricter, reporting an overall failure if any targeted repository's
- contents cannot be listed.
- notable: true
diff --git a/docs/changelog/107216.yaml b/docs/changelog/107216.yaml
deleted file mode 100644
index 7144eedf9bea4..0000000000000
--- a/docs/changelog/107216.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107216
-summary: Add per-field KNN vector format to Index Segments API
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/107240.yaml b/docs/changelog/107240.yaml
deleted file mode 100644
index baf4c222a9a27..0000000000000
--- a/docs/changelog/107240.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107240
-summary: Include doc size info in ingest stats
-area: Ingest Node
-type: enhancement
-issues:
- - 106386
diff --git a/docs/changelog/107244.yaml b/docs/changelog/107244.yaml
deleted file mode 100644
index f805796674f93..0000000000000
--- a/docs/changelog/107244.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107244
-summary: Support effective watermark thresholds in node stats API
-area: Allocation
-type: enhancement
-issues: [106676]
diff --git a/docs/changelog/107279.yaml b/docs/changelog/107279.yaml
deleted file mode 100644
index a2940ecc9ba2d..0000000000000
--- a/docs/changelog/107279.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107279
-summary: Introduce _transform/_node_stats API
-area: Transform
-type: feature
-issues: []
diff --git a/docs/changelog/107409.yaml b/docs/changelog/107409.yaml
deleted file mode 100644
index 6f2350239772f..0000000000000
--- a/docs/changelog/107409.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107409
-summary: "ESQL: Introduce a casting operator, `::`"
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/107410.yaml b/docs/changelog/107410.yaml
deleted file mode 100644
index 5026e88cfa762..0000000000000
--- a/docs/changelog/107410.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107410
-summary: Cluster-state based Security role mapper
-area: Authorization
-type: enhancement
-issues: []
diff --git a/docs/changelog/107415.yaml b/docs/changelog/107415.yaml
deleted file mode 100644
index 8877d0426c60d..0000000000000
--- a/docs/changelog/107415.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107415
-summary: Fix `DecayFunctions'` `toString`
-area: Search
-type: bug
-issues:
- - 100870
diff --git a/docs/changelog/107426.yaml b/docs/changelog/107426.yaml
deleted file mode 100644
index 2feed3df56108..0000000000000
--- a/docs/changelog/107426.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107426
-summary: Support wait indefinitely for search tasks to complete on node shutdown
-area: Infra/Node Lifecycle
-type: bug
-issues: []
diff --git a/docs/changelog/107435.yaml b/docs/changelog/107435.yaml
deleted file mode 100644
index ae5d2215419c4..0000000000000
--- a/docs/changelog/107435.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107435
-summary: '`NoSuchRemoteClusterException` should not be thrown when a remote is configured'
-area: Network
-type: bug
-issues:
- - 107381
diff --git a/docs/changelog/107493.yaml b/docs/changelog/107493.yaml
deleted file mode 100644
index dfd45e1493c95..0000000000000
--- a/docs/changelog/107493.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107493
-summary: Remote cluster - API key security model - cluster privileges
-area: Security
-type: enhancement
-issues: []
diff --git a/docs/changelog/107545.yaml b/docs/changelog/107545.yaml
deleted file mode 100644
index ad457cc5a533f..0000000000000
--- a/docs/changelog/107545.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107545
-summary: "ESQL: Union Types Support"
-area: ES|QL
-type: enhancement
-issues:
- - 100603
diff --git a/docs/changelog/107549.yaml b/docs/changelog/107549.yaml
deleted file mode 100644
index 36250cf65b4d9..0000000000000
--- a/docs/changelog/107549.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107549
-summary: Add synthetic source support for binary fields
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/107567.yaml b/docs/changelog/107567.yaml
deleted file mode 100644
index 558b5b570b1fb..0000000000000
--- a/docs/changelog/107567.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107567
-summary: Add ignored field values to synthetic source
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/107579.yaml b/docs/changelog/107579.yaml
deleted file mode 100644
index fdee59424b8de..0000000000000
--- a/docs/changelog/107579.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107579
-summary: Adding `hits_time_in_millis` and `misses_time_in_millis` to enrich cache
- stats
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/107593.yaml b/docs/changelog/107593.yaml
deleted file mode 100644
index 2e3d2cbc80119..0000000000000
--- a/docs/changelog/107593.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107593
-summary: Add auto-sharding APM metrics
-area: Infra/Metrics
-type: enhancement
-issues: []
diff --git a/docs/changelog/107640.yaml b/docs/changelog/107640.yaml
deleted file mode 100644
index 9871943481f20..0000000000000
--- a/docs/changelog/107640.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107640
-summary: "Unified Highlighter to support matched_fields "
-area: Highlighting
-type: enhancement
-issues:
- - 5172
diff --git a/docs/changelog/107645.yaml b/docs/changelog/107645.yaml
deleted file mode 100644
index 93fc0f2a89b3a..0000000000000
--- a/docs/changelog/107645.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 107645
-summary: Add `_name` support for top level `knn` clauses
-area: Search
-type: enhancement
-issues:
- - 106254
- - 107448
diff --git a/docs/changelog/107647.yaml b/docs/changelog/107647.yaml
deleted file mode 100644
index 97d98a7c91079..0000000000000
--- a/docs/changelog/107647.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107647
-summary: Adding human readable times to geoip stats
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/107663.yaml b/docs/changelog/107663.yaml
deleted file mode 100644
index a7c3dc185425a..0000000000000
--- a/docs/changelog/107663.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107663
-summary: Optimize `GeoBounds` and `GeoCentroid` aggregations for single value fields
-area: Geo
-type: enhancement
-issues: []
diff --git a/docs/changelog/107675.yaml b/docs/changelog/107675.yaml
deleted file mode 100644
index b1d51cd3f8538..0000000000000
--- a/docs/changelog/107675.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-pr: 107675
-summary: Interpret `?timeout=-1` as infinite ack timeout
-area: Cluster Coordination
-type: breaking
-issues: []
-breaking:
- title: Interpret `?timeout=-1` as infinite ack timeout
- area: REST API
- details: |
- Today {es} accepts the parameter `?timeout=-1` in many APIs, but interprets
- this to mean the same as `?timeout=0`. From 8.15 onwards `?timeout=-1` will
- mean to wait indefinitely, aligning the behaviour of this parameter with
- other similar parameters such as `?master_timeout`.
- impact: |
- Use `?timeout=0` to force relevant operations to time out immediately
- instead of `?timeout=-1`
- notable: false
diff --git a/docs/changelog/107676.yaml b/docs/changelog/107676.yaml
deleted file mode 100644
index b14bc29e66efd..0000000000000
--- a/docs/changelog/107676.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107676
-summary: Add model download progress to the download task status
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/107706.yaml b/docs/changelog/107706.yaml
deleted file mode 100644
index 76b7f662bf0e0..0000000000000
--- a/docs/changelog/107706.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107706
-summary: Add rate limiting support for the Inference API
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/107735.yaml b/docs/changelog/107735.yaml
deleted file mode 100644
index 372cb59ba8b1f..0000000000000
--- a/docs/changelog/107735.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107735
-summary: Implement synthetic source support for annotated text field
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/107739.yaml b/docs/changelog/107739.yaml
deleted file mode 100644
index c55a0e332b4f6..0000000000000
--- a/docs/changelog/107739.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107739
-summary: Binary field enables doc values by default for index mode with synthe…
-area: Mapping
-type: enhancement
-issues:
- - 107554
diff --git a/docs/changelog/107764.yaml b/docs/changelog/107764.yaml
deleted file mode 100644
index 3f83efc789014..0000000000000
--- a/docs/changelog/107764.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107764
-summary: Increase size of big arrays only when there is an actual value in the aggregators
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107779.yaml b/docs/changelog/107779.yaml
deleted file mode 100644
index a41c19a2329e0..0000000000000
--- a/docs/changelog/107779.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107779
-summary: Allow rescorer with field collapsing
-area: Search
-type: enhancement
-issues:
- - 27243
\ No newline at end of file
diff --git a/docs/changelog/107792.yaml b/docs/changelog/107792.yaml
deleted file mode 100644
index bd9730d49d5d6..0000000000000
--- a/docs/changelog/107792.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107792
-summary: Halt Indexer on Stop/Abort API
-area: Transform
-type: bug
-issues: []
diff --git a/docs/changelog/107813.yaml b/docs/changelog/107813.yaml
deleted file mode 100644
index 1cbb518a8be5b..0000000000000
--- a/docs/changelog/107813.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107813
-summary: Increase size of big arrays only when there is an actual value in the aggregators
- (Analytics module)
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107827.yaml b/docs/changelog/107827.yaml
deleted file mode 100644
index 7cf217567b745..0000000000000
--- a/docs/changelog/107827.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107827
-summary: Add permission to secure access to certain config files
-area: Security
-type: bug
-issues: []
diff --git a/docs/changelog/107832.yaml b/docs/changelog/107832.yaml
deleted file mode 100644
index 491c491736005..0000000000000
--- a/docs/changelog/107832.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107832
-summary: Optimise few metric aggregations for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107862.yaml b/docs/changelog/107862.yaml
deleted file mode 100644
index 77f7a8c9fb02a..0000000000000
--- a/docs/changelog/107862.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107862
-summary: Fix serialization of put-shutdown request
-area: Infra/Node Lifecycle
-type: bug
-issues:
- - 107857
diff --git a/docs/changelog/107876.yaml b/docs/changelog/107876.yaml
deleted file mode 100644
index 21624cacf7e1d..0000000000000
--- a/docs/changelog/107876.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107876
-summary: "ESQL: Add aggregates node level reduction"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/107877.yaml b/docs/changelog/107877.yaml
deleted file mode 100644
index cf458b3aa3a42..0000000000000
--- a/docs/changelog/107877.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107877
-summary: Support metrics counter types in ESQL
-area: "ES|QL"
-type: enhancement
-issues: []
diff --git a/docs/changelog/107886.yaml b/docs/changelog/107886.yaml
deleted file mode 100644
index a328bc2a2a208..0000000000000
--- a/docs/changelog/107886.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107886
-summary: Cluster state role mapper file settings service
-area: Authorization
-type: enhancement
-issues: []
diff --git a/docs/changelog/107892.yaml b/docs/changelog/107892.yaml
deleted file mode 100644
index 5fd5404c48d02..0000000000000
--- a/docs/changelog/107892.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107892
-summary: Optimise cardinality aggregations for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107893.yaml b/docs/changelog/107893.yaml
deleted file mode 100644
index 61f0f4d76e679..0000000000000
--- a/docs/changelog/107893.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107893
-summary: Optimise histogram aggregations for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107897.yaml b/docs/changelog/107897.yaml
deleted file mode 100644
index e4a2a5270475d..0000000000000
--- a/docs/changelog/107897.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107897
-summary: Optimise composite aggregations for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107917.yaml b/docs/changelog/107917.yaml
deleted file mode 100644
index 18125bf46f4b7..0000000000000
--- a/docs/changelog/107917.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107917
-summary: Exit gracefully when deleted
-area: Transform
-type: bug
-issues:
- - 107266
diff --git a/docs/changelog/107922.yaml b/docs/changelog/107922.yaml
deleted file mode 100644
index e28d0f6262af4..0000000000000
--- a/docs/changelog/107922.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107922
-summary: Feature/annotated text store defaults
-area: Mapping
-type: enhancement
-issues:
- - 107734
diff --git a/docs/changelog/107930.yaml b/docs/changelog/107930.yaml
deleted file mode 100644
index 90af5c55b8604..0000000000000
--- a/docs/changelog/107930.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107930
-summary: Optimise terms aggregations for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107937.yaml b/docs/changelog/107937.yaml
deleted file mode 100644
index 5938c8e8b6602..0000000000000
--- a/docs/changelog/107937.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107937
-summary: Optimise multiterms aggregation for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/107947.yaml b/docs/changelog/107947.yaml
deleted file mode 100644
index 637ac3c005779..0000000000000
--- a/docs/changelog/107947.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107947
-summary: "ESQL: Fix equals `hashCode` for functions"
-area: ES|QL
-type: bug
-issues:
- - 104393
diff --git a/docs/changelog/107967.yaml b/docs/changelog/107967.yaml
deleted file mode 100644
index 159370e44f236..0000000000000
--- a/docs/changelog/107967.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107967
-summary: Sort time series indices by time range in `GetDataStreams` API
-area: TSDB
-type: bug
-issues:
- - 102088
diff --git a/docs/changelog/107972.yaml b/docs/changelog/107972.yaml
deleted file mode 100644
index 3ec83d6a56954..0000000000000
--- a/docs/changelog/107972.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107972
-summary: Require question to be non-null in `QuestionAnsweringConfig`
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/107977.yaml b/docs/changelog/107977.yaml
deleted file mode 100644
index fdbbb57d7e48f..0000000000000
--- a/docs/changelog/107977.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107977
-summary: Fix off by one error when handling null values in range fields
-area: Mapping
-type: bug
-issues:
- - 107282
diff --git a/docs/changelog/107978.yaml b/docs/changelog/107978.yaml
deleted file mode 100644
index 50115df9ee092..0000000000000
--- a/docs/changelog/107978.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107978
-summary: Drop shards close timeout when stopping node.
-area: Engine
-type: enhancement
-issues:
- - 107938
diff --git a/docs/changelog/107987.yaml b/docs/changelog/107987.yaml
deleted file mode 100644
index e8afebde0b190..0000000000000
--- a/docs/changelog/107987.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 107987
-summary: "ESQL: Implement LOOKUP, an \"inline\" enrich"
-area: ES|QL
-type: enhancement
-issues:
- - 107306
diff --git a/docs/changelog/107990.yaml b/docs/changelog/107990.yaml
deleted file mode 100644
index 80cb96aca4426..0000000000000
--- a/docs/changelog/107990.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 107990
-summary: Optimise `time_series` aggregation for single value fields
-area: TSDB
-type: enhancement
-issues: []
diff --git a/docs/changelog/108016.yaml b/docs/changelog/108016.yaml
deleted file mode 100644
index 0aa3f86a6f859..0000000000000
--- a/docs/changelog/108016.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108016
-summary: Optimise `BinaryRangeAggregator` for single value fields
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/108019.yaml b/docs/changelog/108019.yaml
deleted file mode 100644
index 69e8e9fd371f8..0000000000000
--- a/docs/changelog/108019.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108019
-summary: Ignore additional cpu.stat fields
-area: Infra/Core
-type: bug
-issues:
- - 107983
diff --git a/docs/changelog/108051.yaml b/docs/changelog/108051.yaml
deleted file mode 100644
index a47e1192c6090..0000000000000
--- a/docs/changelog/108051.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108051
-summary: Track synthetic source for disabled objects
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/108065.yaml b/docs/changelog/108065.yaml
deleted file mode 100644
index 2ec93bf6e6295..0000000000000
--- a/docs/changelog/108065.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108065
-summary: '`DenseVectorFieldMapper` fixed typo'
-area: Mapping
-type: bug
-issues: []
diff --git a/docs/changelog/108070.yaml b/docs/changelog/108070.yaml
deleted file mode 100644
index cde191aa50804..0000000000000
--- a/docs/changelog/108070.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108070
-summary: Redirect `VersionConflict` to reset code
-area: Transform
-type: bug
-issues: []
diff --git a/docs/changelog/108088.yaml b/docs/changelog/108088.yaml
deleted file mode 100644
index 95c58f6dc19f1..0000000000000
--- a/docs/changelog/108088.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108088
-summary: Add a SIMD (AVX2) optimised vector distance function for int7 on x64
-area: "Search"
-type: enhancement
-issues: []
diff --git a/docs/changelog/108089.yaml b/docs/changelog/108089.yaml
deleted file mode 100644
index 02fb6349185a6..0000000000000
--- a/docs/changelog/108089.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108089
-summary: "ES|QL: limit query depth to 500 levels"
-area: ES|QL
-type: bug
-issues:
- - 107752
diff --git a/docs/changelog/108106.yaml b/docs/changelog/108106.yaml
deleted file mode 100644
index e9dd438e620c4..0000000000000
--- a/docs/changelog/108106.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108106
-summary: Simulate should succeed if `ignore_missing_pipeline`
-area: Ingest Node
-type: bug
-issues:
- - 107314
diff --git a/docs/changelog/108118.yaml b/docs/changelog/108118.yaml
deleted file mode 100644
index b9b0f1c1406e0..0000000000000
--- a/docs/changelog/108118.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108118
-summary: Optimize for single value in ordinals grouping
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108122.yaml b/docs/changelog/108122.yaml
deleted file mode 100644
index 981ab39b9dad8..0000000000000
--- a/docs/changelog/108122.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108122
-summary: Correct query profiling for conjunctions
-area: Search
-type: bug
-issues:
- - 108116
diff --git a/docs/changelog/108130.yaml b/docs/changelog/108130.yaml
deleted file mode 100644
index 5b431bdb0cc1b..0000000000000
--- a/docs/changelog/108130.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108130
-summary: Optimise frequent item sets aggregation for single value fields
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/108131.yaml b/docs/changelog/108131.yaml
deleted file mode 100644
index 7a4286c1e44a0..0000000000000
--- a/docs/changelog/108131.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108131
-summary: "Inference Processor: skip inference when all fields are missing"
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/108144.yaml b/docs/changelog/108144.yaml
deleted file mode 100644
index 6ff5b1d600d0e..0000000000000
--- a/docs/changelog/108144.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108144
-summary: Bump Tika dependency to 2.9.2
-area: Ingest Node
-type: upgrade
-issues: []
diff --git a/docs/changelog/108145.yaml b/docs/changelog/108145.yaml
deleted file mode 100644
index b8c9428c1e3a8..0000000000000
--- a/docs/changelog/108145.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108145
-summary: Async close of `IndexShard`
-area: Engine
-type: bug
-issues: []
diff --git a/docs/changelog/108146.yaml b/docs/changelog/108146.yaml
deleted file mode 100644
index 2a4f917134090..0000000000000
--- a/docs/changelog/108146.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108146
-summary: Allow deletion of the ELSER inference service when reference in ingest
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/108155.yaml b/docs/changelog/108155.yaml
deleted file mode 100644
index 57db86b4005b9..0000000000000
--- a/docs/changelog/108155.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108155
-summary: Upgrade to Netty 4.1.109
-area: Network
-type: upgrade
-issues: []
diff --git a/docs/changelog/108161.yaml b/docs/changelog/108161.yaml
deleted file mode 100644
index 73fa41e2089d3..0000000000000
--- a/docs/changelog/108161.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108161
-summary: Refactor TextEmbeddingResults to use primitives rather than objects
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/108165.yaml b/docs/changelog/108165.yaml
deleted file mode 100644
index b88b0f5e217dd..0000000000000
--- a/docs/changelog/108165.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108165
-summary: Add `BlockHash` for 3 `BytesRefs`
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108171.yaml b/docs/changelog/108171.yaml
deleted file mode 100644
index 1ec17bb3e411d..0000000000000
--- a/docs/changelog/108171.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108171
-summary: "add Elastic-internal stable bridge api for use by Logstash"
-area: Infra/Core
-type: enhancement
-issues: []
diff --git a/docs/changelog/108222.yaml b/docs/changelog/108222.yaml
deleted file mode 100644
index 701b853441e32..0000000000000
--- a/docs/changelog/108222.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108222
-summary: Add generic fallback implementation for synthetic source
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/108223.yaml b/docs/changelog/108223.yaml
deleted file mode 100644
index ba8756a8f9c68..0000000000000
--- a/docs/changelog/108223.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108223
-summary: Upgrade bouncy castle (non-fips) to 1.78.1
-area: Security
-type: upgrade
-issues: []
diff --git a/docs/changelog/108227.yaml b/docs/changelog/108227.yaml
deleted file mode 100644
index 79f69bc4aaff6..0000000000000
--- a/docs/changelog/108227.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108227
-summary: "Apm-data: improve indexing resilience"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/108254.yaml b/docs/changelog/108254.yaml
deleted file mode 100644
index 3bf08e8b8f5fc..0000000000000
--- a/docs/changelog/108254.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108254
-summary: Add `sparse_vector` query
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/108266.yaml b/docs/changelog/108266.yaml
deleted file mode 100644
index 5a189cfcdc258..0000000000000
--- a/docs/changelog/108266.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108266
-summary: Log details of non-green indicators in `HealthPeriodicLogger`
-area: Health
-type: enhancement
-issues: []
diff --git a/docs/changelog/108300.yaml b/docs/changelog/108300.yaml
deleted file mode 100644
index c4d6e468113a4..0000000000000
--- a/docs/changelog/108300.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108300
-summary: "ESQL: Add more time span units"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108306.yaml b/docs/changelog/108306.yaml
deleted file mode 100644
index 7a104ce880f43..0000000000000
--- a/docs/changelog/108306.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108306
-summary: Enable inter-segment concurrency for low cardinality numeric terms aggs
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/108333.yaml b/docs/changelog/108333.yaml
deleted file mode 100644
index c3152500ce1b2..0000000000000
--- a/docs/changelog/108333.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108333
-summary: Allow `read_slm` to call GET /_slm/status
-area: ILM+SLM
-type: bug
-issues: []
diff --git a/docs/changelog/108340.yaml b/docs/changelog/108340.yaml
deleted file mode 100644
index fb2ea72c0a0f5..0000000000000
--- a/docs/changelog/108340.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108340
-summary: "Apm-data: increase version for templates"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/108349.yaml b/docs/changelog/108349.yaml
deleted file mode 100644
index 6d9ea3d658dca..0000000000000
--- a/docs/changelog/108349.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108349
-summary: "Ecs@mappings: reduce scope for `ecs_geo_point`"
-area: Data streams
-type: bug
-issues:
- - 108338
diff --git a/docs/changelog/108379.yaml b/docs/changelog/108379.yaml
deleted file mode 100644
index 312856a5db33d..0000000000000
--- a/docs/changelog/108379.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108379
-summary: Create a new `NodeRequest` for every `NodesDataTiersUsageTransport` use
-area: Indices APIs
-type: bug
-issues: []
diff --git a/docs/changelog/108394.yaml b/docs/changelog/108394.yaml
deleted file mode 100644
index 58f48fa548c6e..0000000000000
--- a/docs/changelog/108394.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108394
-summary: Handle `IndexNotFoundException`
-area: Transform
-type: bug
-issues:
- - 107263
diff --git a/docs/changelog/108395.yaml b/docs/changelog/108395.yaml
deleted file mode 100644
index c33cf169a99fa..0000000000000
--- a/docs/changelog/108395.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108395
-summary: "ESQL: change from quoting from backtick to quote"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/108396.yaml b/docs/changelog/108396.yaml
deleted file mode 100644
index 63937646b755c..0000000000000
--- a/docs/changelog/108396.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108396
-summary: "Apm-data: improve default pipeline performance"
-area: Data streams
-type: enhancement
-issues:
- - 108290
diff --git a/docs/changelog/108409.yaml b/docs/changelog/108409.yaml
deleted file mode 100644
index 6cff86cf93930..0000000000000
--- a/docs/changelog/108409.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108409
-summary: Support multiple associated groups for TopN
-area: Application
-type: enhancement
-issues:
- - 108018
diff --git a/docs/changelog/108410.yaml b/docs/changelog/108410.yaml
deleted file mode 100644
index 5fd831231a3be..0000000000000
--- a/docs/changelog/108410.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108410
-summary: GeoIP tasks should wait longer for master
-area: Ingest Node
-type: bug
-issues: []
diff --git a/docs/changelog/108417.yaml b/docs/changelog/108417.yaml
deleted file mode 100644
index bb650922f1be5..0000000000000
--- a/docs/changelog/108417.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108417
-summary: Track source for arrays of objects
-area: Mapping
-type: enhancement
-issues:
- - 90708
diff --git a/docs/changelog/108421.yaml b/docs/changelog/108421.yaml
deleted file mode 100644
index 1f077a4a2cb7c..0000000000000
--- a/docs/changelog/108421.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108421
-summary: "[ES|QL] Support Named and Positional Parameters in `EsqlQueryRequest`"
-area: ES|QL
-type: enhancement
-issues:
- - 107029
diff --git a/docs/changelog/108429.yaml b/docs/changelog/108429.yaml
deleted file mode 100644
index 562454a0de256..0000000000000
--- a/docs/changelog/108429.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108429
-summary: Fix `ClassCastException` in Significant Terms
-area: Aggregations
-type: bug
-issues:
- - 108427
diff --git a/docs/changelog/108444.yaml b/docs/changelog/108444.yaml
deleted file mode 100644
index c946ab24f939a..0000000000000
--- a/docs/changelog/108444.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108444
-summary: "Apm-data: ignore malformed fields, and too many dynamic fields"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/108452.yaml b/docs/changelog/108452.yaml
deleted file mode 100644
index fdf531602c806..0000000000000
--- a/docs/changelog/108452.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108452
-summary: Add the rerank task to the Elasticsearch internal inference service
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/108455.yaml b/docs/changelog/108455.yaml
deleted file mode 100644
index 8397af7b07cf1..0000000000000
--- a/docs/changelog/108455.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108455
-summary: "[ES|QL] Convert string to datetime when the other size of an arithmetic\
- \ operator is `date_period` or `time_duration`"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108459.yaml b/docs/changelog/108459.yaml
deleted file mode 100644
index 5e05797f284be..0000000000000
--- a/docs/changelog/108459.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108459
-summary: Do not use global ordinals strategy if the leaf reader context cannot be
- obtained
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/108472.yaml b/docs/changelog/108472.yaml
deleted file mode 100644
index 82481e4edec3a..0000000000000
--- a/docs/changelog/108472.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108472
-summary: Add support for Azure AI Studio embeddings and completions to the inference service.
-area: Machine Learning
-type: feature
-issues: []
diff --git a/docs/changelog/108517.yaml b/docs/changelog/108517.yaml
deleted file mode 100644
index 359c8302fdf6c..0000000000000
--- a/docs/changelog/108517.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108517
-summary: Forward `indexServiceSafe` exception to listener
-area: Transform
-type: bug
-issues:
- - 108418
diff --git a/docs/changelog/108521.yaml b/docs/changelog/108521.yaml
deleted file mode 100644
index adc7c11a4decd..0000000000000
--- a/docs/changelog/108521.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108521
-summary: Adding override for lintian false positive on `libvec.so`
-area: "Packaging"
-type: bug
-issues:
- - 108514
diff --git a/docs/changelog/108522.yaml b/docs/changelog/108522.yaml
deleted file mode 100644
index 5bc064d7995e9..0000000000000
--- a/docs/changelog/108522.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108522
-summary: Ensure we return non-negative scores when scoring scalar dot-products
-area: Vector Search
-type: bug
-issues: []
diff --git a/docs/changelog/108537.yaml b/docs/changelog/108537.yaml
deleted file mode 100644
index 1c0228a71d449..0000000000000
--- a/docs/changelog/108537.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108537
-summary: Limit the value in prefix query
-area: Search
-type: enhancement
-issues:
- - 108486
diff --git a/docs/changelog/108538.yaml b/docs/changelog/108538.yaml
deleted file mode 100644
index 10ae49f0c1670..0000000000000
--- a/docs/changelog/108538.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108538
-summary: Adding RankFeature search phase implementation
-area: Search
-type: feature
-issues: []
diff --git a/docs/changelog/108574.yaml b/docs/changelog/108574.yaml
deleted file mode 100644
index b3c957721e01e..0000000000000
--- a/docs/changelog/108574.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108574
-summary: "[ESQL] CBRT function"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108602.yaml b/docs/changelog/108602.yaml
deleted file mode 100644
index d544c89980123..0000000000000
--- a/docs/changelog/108602.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108602
-summary: "[Inference API] Extract optional long instead of integer in `RateLimitSettings#of`"
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/108606.yaml b/docs/changelog/108606.yaml
deleted file mode 100644
index 04780bff58800..0000000000000
--- a/docs/changelog/108606.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-pr: 108606
-summary: "Extend ISO8601 datetime parser to specify forbidden fields, allowing it to be used\
- \ on more formats"
-area: Infra/Core
-type: enhancement
-issues: []
-highlight:
- title: New custom parser for more ISO-8601 date formats
- body: |-
- Following on from #106486, this extends the custom ISO-8601 datetime parser to cover the `strict_year`,
- `strict_year_month`, `strict_date_time`, `strict_date_time_no_millis`, `strict_date_hour_minute_second`,
- `strict_date_hour_minute_second_millis`, and `strict_date_hour_minute_second_fraction` date formats.
- As before, the parser will use the existing java.time parser if there are parsing issues, and the
- `es.datetime.java_time_parsers=true` JVM property will force the use of the old parsers regardless.
diff --git a/docs/changelog/108607.yaml b/docs/changelog/108607.yaml
deleted file mode 100644
index 9ad4cf91e67b9..0000000000000
--- a/docs/changelog/108607.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108607
-summary: Specify parse index when error occurs on multiple datetime parses
-area: Infra/Core
-type: bug
-issues: []
diff --git a/docs/changelog/108612.yaml b/docs/changelog/108612.yaml
deleted file mode 100644
index 7a3dfa2b7ba44..0000000000000
--- a/docs/changelog/108612.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108612
-summary: "[Connector API] Change `UpdateConnectorFiltering` API to have better defaults"
-area: Application
-type: enhancement
-issues: []
diff --git a/docs/changelog/108624.yaml b/docs/changelog/108624.yaml
deleted file mode 100644
index 0da1fd2902c03..0000000000000
--- a/docs/changelog/108624.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-pr: 108624
-summary: Disallow new rollup jobs in clusters with no rollup usage
-area: Rollup
-type: breaking
-issues:
- - 108381
-breaking:
- title: Disallow new rollup jobs in clusters with no rollup usage
- area: Rollup
- details: The put rollup API will fail with an error when a rollup job is created in a cluster with no rollup usage
- impact: Clusters with no rollup usage (either no rollup job or index) can not create new rollup jobs
- notable: true
diff --git a/docs/changelog/108639.yaml b/docs/changelog/108639.yaml
deleted file mode 100644
index e4964cbeb0285..0000000000000
--- a/docs/changelog/108639.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108639
-summary: Add support for the 'Domain' database to the geoip processor
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/108643.yaml b/docs/changelog/108643.yaml
deleted file mode 100644
index f71a943673326..0000000000000
--- a/docs/changelog/108643.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108643
-summary: Use `scheduleUnlessShuttingDown` in `LeaderChecker`
-area: Cluster Coordination
-type: bug
-issues:
- - 108642
diff --git a/docs/changelog/108651.yaml b/docs/changelog/108651.yaml
deleted file mode 100644
index 227c464909d50..0000000000000
--- a/docs/changelog/108651.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108651
-summary: Add support for the 'ISP' database to the geoip processor
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/108672.yaml b/docs/changelog/108672.yaml
deleted file mode 100644
index e1261fcf6f232..0000000000000
--- a/docs/changelog/108672.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108672
-summary: Add bounds checking to parsing ISO8601 timezone offset values
-area: Infra/Core
-type: bug
-issues: []
diff --git a/docs/changelog/108679.yaml b/docs/changelog/108679.yaml
deleted file mode 100644
index 62cd82a52c5bb..0000000000000
--- a/docs/changelog/108679.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108679
-summary: Suppress deprecation warnings from ingest pipelines when deleting trained model
-area: Machine Learning
-type: bug
-issues:
- - 105004
diff --git a/docs/changelog/108682.yaml b/docs/changelog/108682.yaml
deleted file mode 100644
index bd566acab8306..0000000000000
--- a/docs/changelog/108682.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108682
-summary: Adding support for explain in rrf
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/108683.yaml b/docs/changelog/108683.yaml
deleted file mode 100644
index b9e7df5fefc18..0000000000000
--- a/docs/changelog/108683.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-pr: 108683
-summary: Add support for the 'Connection Type' database to the geoip processor
-area: Ingest Node
-type: enhancement
-issues: []
-highlight:
- title: "Preview: Support for the 'Connection Type, 'Domain', and 'ISP' databases in the geoip processor"
- body: |-
- As a Technical Preview, the {ref}/geoip-processor.html[`geoip`] processor can now use the commercial
- https://dev.maxmind.com/geoip/docs/databases/connection-type[GeoIP2 'Connection Type'],
- https://dev.maxmind.com/geoip/docs/databases/domain[GeoIP2 'Domain'],
- and
- https://dev.maxmind.com/geoip/docs/databases/isp[GeoIP2 'ISP']
- databases from MaxMind.
diff --git a/docs/changelog/108684.yaml b/docs/changelog/108684.yaml
deleted file mode 100644
index 91684d2998be6..0000000000000
--- a/docs/changelog/108684.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108684
-summary: Check if `CsvTests` required capabilities exist
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108687.yaml b/docs/changelog/108687.yaml
deleted file mode 100644
index 771516d551567..0000000000000
--- a/docs/changelog/108687.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108687
-summary: Adding `user_type` support for the enterprise database for the geoip processor
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/108693.yaml b/docs/changelog/108693.yaml
deleted file mode 100644
index ee701e0f57736..0000000000000
--- a/docs/changelog/108693.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108693
-summary: Test pipeline run after reroute
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/108705.yaml b/docs/changelog/108705.yaml
deleted file mode 100644
index fd08734831018..0000000000000
--- a/docs/changelog/108705.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108705
-summary: Associate restore snapshot task to parent mount task
-area: Distributed
-type: bug
-issues:
- - 105830
diff --git a/docs/changelog/108713.yaml b/docs/changelog/108713.yaml
deleted file mode 100644
index d6b1ddabd6c1e..0000000000000
--- a/docs/changelog/108713.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108713
-summary: Rewrite away type converting functions that do not convert types
-area: ES|QL
-type: enhancement
-issues:
- - 107716
diff --git a/docs/changelog/108726.yaml b/docs/changelog/108726.yaml
deleted file mode 100644
index 2e800a45e6975..0000000000000
--- a/docs/changelog/108726.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108726
-summary: Allow RA metrics to be reported upon parsing completed or accumulated
-area: Infra/Metrics
-type: enhancement
-issues: []
diff --git a/docs/changelog/108733.yaml b/docs/changelog/108733.yaml
deleted file mode 100644
index 76a969219ea4c..0000000000000
--- a/docs/changelog/108733.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108733
-summary: Query Roles API
-area: Security
-type: feature
-issues: []
diff --git a/docs/changelog/108746.yaml b/docs/changelog/108746.yaml
deleted file mode 100644
index 93ed917f3b56e..0000000000000
--- a/docs/changelog/108746.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108746
-summary: Support synthetic source for `aggregate_metric_double` when ignore_malf…
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/108759.yaml b/docs/changelog/108759.yaml
deleted file mode 100644
index dfc2b30fe6c57..0000000000000
--- a/docs/changelog/108759.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108759
-summary: Expose `?master_timeout` in autoscaling APIs
-area: Autoscaling
-type: bug
-issues: []
diff --git a/docs/changelog/108761.yaml b/docs/changelog/108761.yaml
deleted file mode 100644
index 92aa67ebe0bfe..0000000000000
--- a/docs/changelog/108761.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108761
-summary: Add some missing timeout params to REST API specs
-area: Infra/REST API
-type: bug
-issues: []
diff --git a/docs/changelog/108764.yaml b/docs/changelog/108764.yaml
deleted file mode 100644
index 94de27eb52c9b..0000000000000
--- a/docs/changelog/108764.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108764
-summary: ST_DISTANCE Function
-area: ES|QL
-type: enhancement
-issues:
- - 108212
diff --git a/docs/changelog/108780.yaml b/docs/changelog/108780.yaml
deleted file mode 100644
index 40e66326e6b9b..0000000000000
--- a/docs/changelog/108780.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108780
-summary: Add `continent_code` support to the geoip processor
-area: Ingest Node
-type: enhancement
-issues:
- - 85820
diff --git a/docs/changelog/108786.yaml b/docs/changelog/108786.yaml
deleted file mode 100644
index 1c07a3ceac900..0000000000000
--- a/docs/changelog/108786.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108786
-summary: Make ingest byte stat names more descriptive
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/108793.yaml b/docs/changelog/108793.yaml
deleted file mode 100644
index 87668c8ee009b..0000000000000
--- a/docs/changelog/108793.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108793
-summary: Add `SparseVectorStats`
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/108796.yaml b/docs/changelog/108796.yaml
deleted file mode 100644
index 808247cf347d9..0000000000000
--- a/docs/changelog/108796.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108796
-summary: Return ingest byte stats even when 0-valued
-area: Ingest Node
-type: enhancement
-issues: []
diff --git a/docs/changelog/108814.yaml b/docs/changelog/108814.yaml
deleted file mode 100644
index 94298838c372e..0000000000000
--- a/docs/changelog/108814.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108814
-summary: Deserialize publish requests on generic thread-pool
-area: Cluster Coordination
-type: bug
-issues:
- - 106352
diff --git a/docs/changelog/108818.yaml b/docs/changelog/108818.yaml
deleted file mode 100644
index ed60fb5f64abd..0000000000000
--- a/docs/changelog/108818.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108818
-summary: Store source for nested objects
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/108820.yaml b/docs/changelog/108820.yaml
deleted file mode 100644
index 55045ffce3dfa..0000000000000
--- a/docs/changelog/108820.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108820
-summary: Allow `LuceneSourceOperator` to early terminate
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/108822.yaml b/docs/changelog/108822.yaml
deleted file mode 100644
index 8cec4da5dbc7f..0000000000000
--- a/docs/changelog/108822.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108822
-summary: Update ASM to 9.7 for plugin scanner
-area: Infra/Plugins
-type: upgrade
-issues:
- - 108776
diff --git a/docs/changelog/108831.yaml b/docs/changelog/108831.yaml
deleted file mode 100644
index 496bc0108f9d2..0000000000000
--- a/docs/changelog/108831.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108831
-summary: Rename rule query and add support for multiple rulesets
-area: Application
-type: enhancement
-issues: [ ]
diff --git a/docs/changelog/108849.yaml b/docs/changelog/108849.yaml
deleted file mode 100644
index 7c503efe9187b..0000000000000
--- a/docs/changelog/108849.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108849
-summary: "[Osquery] Extend `kibana_system` role with an access to new `osquery_manager`\
- \ index"
-area: Authorization
-type: enhancement
-issues: []
diff --git a/docs/changelog/108856.yaml b/docs/changelog/108856.yaml
deleted file mode 100644
index 9b8f42248a442..0000000000000
--- a/docs/changelog/108856.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108856
-summary: Return noop instance `DocSizeObserver` for updates with scripts
-area: Infra/Metrics
-type: enhancement
-issues: []
diff --git a/docs/changelog/108860.yaml b/docs/changelog/108860.yaml
deleted file mode 100644
index 93aa8ce7c08ff..0000000000000
--- a/docs/changelog/108860.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108860
-summary: "Apm-data: enable plugin by default"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/108862.yaml b/docs/changelog/108862.yaml
deleted file mode 100644
index ddba15f11e8f5..0000000000000
--- a/docs/changelog/108862.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108862
-summary: "Apm-data: set codec: best_compression for logs-apm.* data streams"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/108868.yaml b/docs/changelog/108868.yaml
deleted file mode 100644
index d0643f056cce8..0000000000000
--- a/docs/changelog/108868.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108868
-summary: GA the update trained model action
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/108870.yaml b/docs/changelog/108870.yaml
deleted file mode 100644
index 435eea9845f16..0000000000000
--- a/docs/changelog/108870.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108870
-summary: Adding score from `RankDoc` to `SearchHit`
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/108871.yaml b/docs/changelog/108871.yaml
deleted file mode 100644
index 46bf8ca9d8404..0000000000000
--- a/docs/changelog/108871.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108871
-summary: "Reapply \"ESQL: Expose \"_ignored\" metadata field\""
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/108878.yaml b/docs/changelog/108878.yaml
deleted file mode 100644
index 1a8127869a647..0000000000000
--- a/docs/changelog/108878.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108878
-summary: Support arrays in fallback synthetic source implementation
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/108881.yaml b/docs/changelog/108881.yaml
deleted file mode 100644
index b6de1129cfa03..0000000000000
--- a/docs/changelog/108881.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108881
-summary: Add synthetic source support for `geo_shape` via fallback implementation
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/108885.yaml b/docs/changelog/108885.yaml
deleted file mode 100644
index c66843e082e29..0000000000000
--- a/docs/changelog/108885.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108885
-summary: "Apm-data: increase priority above Fleet templates"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/108886.yaml b/docs/changelog/108886.yaml
deleted file mode 100644
index 18df59e577713..0000000000000
--- a/docs/changelog/108886.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108886
-summary: Expose `?master_timeout` on get-shutdown API
-area: Infra/Node Lifecycle
-type: bug
-issues: []
diff --git a/docs/changelog/108891.yaml b/docs/changelog/108891.yaml
deleted file mode 100644
index 8282b616b34a9..0000000000000
--- a/docs/changelog/108891.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108891
-summary: Fix NPE during destination index creation
-area: Transform
-type: bug
-issues:
- - 108890
diff --git a/docs/changelog/108895.yaml b/docs/changelog/108895.yaml
deleted file mode 100644
index 15293896b20c5..0000000000000
--- a/docs/changelog/108895.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108895
-summary: Add permission to secure access to certain config files specified by settings
-area: "Security"
-type: bug
-issues: []
diff --git a/docs/changelog/108896.yaml b/docs/changelog/108896.yaml
deleted file mode 100644
index c52f074b65605..0000000000000
--- a/docs/changelog/108896.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 108896
-summary: Introduce `logs` index mode as Tech Preview
-area: Logs
-type: feature
-issues:
- - 108896
diff --git a/docs/changelog/108911.yaml b/docs/changelog/108911.yaml
deleted file mode 100644
index 8832e01f7426e..0000000000000
--- a/docs/changelog/108911.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108911
-summary: Store source for fields in objects with `dynamic` override
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/108942.yaml b/docs/changelog/108942.yaml
deleted file mode 100644
index c58b06a92cee8..0000000000000
--- a/docs/changelog/108942.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108942
-summary: Fix NPE in trained model assignment updater
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/108947.yaml b/docs/changelog/108947.yaml
deleted file mode 100644
index 8aa4293242985..0000000000000
--- a/docs/changelog/108947.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108947
-summary: Provide the `DocumentSizeReporter` with index mode
-area: Infra/Metrics
-type: enhancement
-issues: []
diff --git a/docs/changelog/108999.yaml b/docs/changelog/108999.yaml
deleted file mode 100644
index 089d765b4e2d0..0000000000000
--- a/docs/changelog/108999.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 108999
-summary: Use default translog durability on AD results index
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/109007.yaml b/docs/changelog/109007.yaml
deleted file mode 100644
index c828db64220fb..0000000000000
--- a/docs/changelog/109007.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109007
-summary: Multivalue Sparse Vector Support
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/109017.yaml b/docs/changelog/109017.yaml
new file mode 100644
index 0000000000000..80bcdd6fc0e25
--- /dev/null
+++ b/docs/changelog/109017.yaml
@@ -0,0 +1,6 @@
+pr: 109017
+summary: "ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security\
+ \ solution"
+area: ES|QL
+type: "feature"
+issues: [ ]
diff --git a/docs/changelog/109025.yaml b/docs/changelog/109025.yaml
deleted file mode 100644
index 38d19cab13d30..0000000000000
--- a/docs/changelog/109025.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109025
-summary: Introduce a setting controlling the activation of the `logs` index mode in logs@settings
-area: Logs
-type: feature
-issues:
- - 108762
diff --git a/docs/changelog/109042.yaml b/docs/changelog/109042.yaml
deleted file mode 100644
index 5aa80db991c0d..0000000000000
--- a/docs/changelog/109042.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109042
-summary: Add Create or update query rule API call
-area: Application
-type: enhancement
-issues: [ ]
diff --git a/docs/changelog/109043.yaml b/docs/changelog/109043.yaml
deleted file mode 100644
index bdfe3addea8e9..0000000000000
--- a/docs/changelog/109043.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109043
-summary: "Apm-data: set concrete values for `metricset.interval`"
-area: Data streams
-type: bug
-issues: []
diff --git a/docs/changelog/109044.yaml b/docs/changelog/109044.yaml
deleted file mode 100644
index 9e50c377606a0..0000000000000
--- a/docs/changelog/109044.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109044
-summary: Enable fallback synthetic source for `token_count`
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/109047.yaml b/docs/changelog/109047.yaml
deleted file mode 100644
index 85a8808353a08..0000000000000
--- a/docs/changelog/109047.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109047
-summary: Prevent concurrent jobs during cleanup
-area: Transform
-type: bug
-issues: []
diff --git a/docs/changelog/109070.yaml b/docs/changelog/109070.yaml
deleted file mode 100644
index 8dbc0ec1c6cf2..0000000000000
--- a/docs/changelog/109070.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109070
-summary: "ESQL: Add `ip_prefix` function"
-area: ES|QL
-type: feature
-issues:
- - 99064
diff --git a/docs/changelog/109071.yaml b/docs/changelog/109071.yaml
deleted file mode 100644
index 275a5433cc1d8..0000000000000
--- a/docs/changelog/109071.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109071
-summary: Better handling of multiple rescorers clauses with LTR
-area: "Search"
-type: bug
-issues: []
diff --git a/docs/changelog/109078.yaml b/docs/changelog/109078.yaml
deleted file mode 100644
index f602ee9b131bc..0000000000000
--- a/docs/changelog/109078.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109078
-summary: Expose API Key cache metrics
-area: Authentication
-type: enhancement
-issues: []
diff --git a/docs/changelog/109084.yaml b/docs/changelog/109084.yaml
deleted file mode 100644
index 67ff5610c5a66..0000000000000
--- a/docs/changelog/109084.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109084
-summary: Add AVX-512 optimised vector distance functions for int7 on x64
-area: Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/109104.yaml b/docs/changelog/109104.yaml
deleted file mode 100644
index 985cf14bc5952..0000000000000
--- a/docs/changelog/109104.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109104
-summary: Offload request to generic threadpool
-area: Machine Learning
-type: bug
-issues:
- - 109100
diff --git a/docs/changelog/109123.yaml b/docs/changelog/109123.yaml
deleted file mode 100644
index dfd7e52b33e7f..0000000000000
--- a/docs/changelog/109123.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109123
-summary: "[Inference API] Check for related pipelines on delete inference endpoint"
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/109126.yaml b/docs/changelog/109126.yaml
deleted file mode 100644
index 248eacc76b65c..0000000000000
--- a/docs/changelog/109126.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109126
-summary: Correctly handle duplicate model ids for the `_cat` trained models api and usage statistics
-area: Machine Learning
-type: bug
-issues: [ ]
diff --git a/docs/changelog/109167.yaml b/docs/changelog/109167.yaml
deleted file mode 100644
index e366b2302263c..0000000000000
--- a/docs/changelog/109167.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109167
-summary: Fixes cluster state-based role mappings not recovered from disk
-area: Authorization
-type: bug
-issues: []
diff --git a/docs/changelog/109174.yaml b/docs/changelog/109174.yaml
deleted file mode 100644
index 5cd57ebd34ac6..0000000000000
--- a/docs/changelog/109174.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109174
-summary: "ESQL: Change \"substring\" function to not return null on empty string"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/109185.yaml b/docs/changelog/109185.yaml
deleted file mode 100644
index 4da72c4b20ffb..0000000000000
--- a/docs/changelog/109185.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109185
-summary: Handle unmatching remote cluster wildcards properly for `IndicesRequest.SingleIndexNoWildcards`
- requests
-area: Authorization
-type: bug
-issues: []
diff --git a/docs/changelog/109194.yaml b/docs/changelog/109194.yaml
deleted file mode 100644
index bf50139547f62..0000000000000
--- a/docs/changelog/109194.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109194
-summary: "[Inference API] Add Mistral Embeddings Support to Inference API"
-area: Machine Learning
-type: enhancement
-issues: [ ]
diff --git a/docs/changelog/109196.yaml b/docs/changelog/109196.yaml
deleted file mode 100644
index 7f5ca3efbc8d4..0000000000000
--- a/docs/changelog/109196.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109196
-summary: Handle nullable `DocsStats` and `StoresStats`
-area: Distributed
-type: bug
-issues: []
diff --git a/docs/changelog/109204.yaml b/docs/changelog/109204.yaml
deleted file mode 100644
index b5b22ef1a06f9..0000000000000
--- a/docs/changelog/109204.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109204
-summary: Detect long-running tasks on network threads
-area: Network
-type: enhancement
-issues: []
diff --git a/docs/changelog/109205.yaml b/docs/changelog/109205.yaml
deleted file mode 100644
index 10f13a6549fbc..0000000000000
--- a/docs/changelog/109205.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109205
-summary: "ESQL: Fix `IpPrefix` function not handling correctly `ByteRefs`"
-area: ES|QL
-type: bug
-issues:
- - 109198
diff --git a/docs/changelog/109219.yaml b/docs/changelog/109219.yaml
deleted file mode 100644
index abf4f49235166..0000000000000
--- a/docs/changelog/109219.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-pr: 109219
-summary: Update Lucene version to 9.11
-area: Search
-type: feature
-issues: []
-highlight:
- title: "Update Elasticsearch to Lucene 9.11"
- body: |-
- Elasticsearch is now updated using the latest Lucene version 9.11.
- Here are the full release notes:
- But, here are some particular highlights:
- - Usage of MADVISE for better memory management: https://github.com/apache/lucene/pull/13196
- - Use RWLock to access LRUQueryCache to reduce contention: https://github.com/apache/lucene/pull/13306
- - Speedup multi-segment HNSW graph search for nested kNN queries: https://github.com/apache/lucene/pull/13121
- - Add a MemorySegment Vector scorer - for scoring without copying on-heap vectors: https://github.com/apache/lucene/pull/13339
diff --git a/docs/changelog/109220.yaml b/docs/changelog/109220.yaml
deleted file mode 100644
index b8efa8f784d7a..0000000000000
--- a/docs/changelog/109220.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109220
-summary: "ESQL: add REPEAT string function"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/109233.yaml b/docs/changelog/109233.yaml
deleted file mode 100644
index 36010273c80db..0000000000000
--- a/docs/changelog/109233.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109233
-summary: Fix trappy timeouts in security settings APIs
-area: Security
-type: bug
-issues: []
diff --git a/docs/changelog/109236.yaml b/docs/changelog/109236.yaml
deleted file mode 100644
index e2eb917ea0343..0000000000000
--- a/docs/changelog/109236.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109236
-summary: Use proper executor for failing requests when connection closes
-area: Network
-type: bug
-issues:
- - 109225
diff --git a/docs/changelog/109240.yaml b/docs/changelog/109240.yaml
deleted file mode 100644
index a9fad3abdc47f..0000000000000
--- a/docs/changelog/109240.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109240
-summary: Fix trappy timeout in allocation explain API
-area: Allocation
-type: bug
-issues: []
diff --git a/docs/changelog/109241.yaml b/docs/changelog/109241.yaml
deleted file mode 100644
index b7343b9df1841..0000000000000
--- a/docs/changelog/109241.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109241
-summary: Fix misc trappy allocation API timeouts
-area: Allocation
-type: bug
-issues: []
diff --git a/docs/changelog/109256.yaml b/docs/changelog/109256.yaml
deleted file mode 100644
index 30c15ed77f9b9..0000000000000
--- a/docs/changelog/109256.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 109256
-summary: "[ESQL] Migrate `SimplifyComparisonArithmetics` optimization"
-area: ES|QL
-type: bug
-issues:
- - 108388
- - 108743
diff --git a/docs/changelog/109312.yaml b/docs/changelog/109312.yaml
deleted file mode 100644
index 594d3f90e8fd1..0000000000000
--- a/docs/changelog/109312.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109312
-summary: Enable fallback synthetic source for `point` and `shape`
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/109317.yaml b/docs/changelog/109317.yaml
deleted file mode 100644
index 1d8595d99c2a6..0000000000000
--- a/docs/changelog/109317.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-pr: 109317
-summary: Add new int4 quantization to dense_vector
-area: Search
-type: feature
-issues: []
-highlight:
- title: Add new int4 quantization to dense_vector
- body: |-
- New int4 (half-byte) scalar quantization support via two knew index types: `int4_hnsw` and `int4_flat`.
- This gives an 8x reduction from `float32` with some accuracy loss. In addition to less memory required, this
- improves query and merge speed significantly when compared to raw vectors.
- notable: true
-
diff --git a/docs/changelog/109332.yaml b/docs/changelog/109332.yaml
deleted file mode 100644
index 3d03523fd518b..0000000000000
--- a/docs/changelog/109332.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109332
-summary: "ES|QL: vectorize eval"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/109358.yaml b/docs/changelog/109358.yaml
deleted file mode 100644
index af47b4129d874..0000000000000
--- a/docs/changelog/109358.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109358
-summary: Use the multi node routing action for internal inference services
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/109359.yaml b/docs/changelog/109359.yaml
deleted file mode 100644
index 37202eb5a28ec..0000000000000
--- a/docs/changelog/109359.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109359
-summary: Adding hamming distance function to painless for `dense_vector` fields
-area: Vector Search
-type: enhancement
-issues: []
diff --git a/docs/changelog/109370.yaml b/docs/changelog/109370.yaml
deleted file mode 100644
index 32b190d1a1c94..0000000000000
--- a/docs/changelog/109370.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109370
-summary: Enable fallback synthetic source by default
-area: Mapping
-type: feature
-issues:
- - 106460
diff --git a/docs/changelog/109384.yaml b/docs/changelog/109384.yaml
deleted file mode 100644
index 303da23d57d8e..0000000000000
--- a/docs/changelog/109384.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109384
-summary: Fix serialising inference delete response
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/109386.yaml b/docs/changelog/109386.yaml
deleted file mode 100644
index 984ee96dde063..0000000000000
--- a/docs/changelog/109386.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109386
-summary: "ESQL: `top_list` aggregation"
-area: ES|QL
-type: feature
-issues:
- - 109213
diff --git a/docs/changelog/109395.yaml b/docs/changelog/109395.yaml
deleted file mode 100644
index e5336695afa48..0000000000000
--- a/docs/changelog/109395.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109395
-summary: Correct positioning for unique token filter
-area: Analysis
-type: bug
-issues: []
diff --git a/docs/changelog/109410.yaml b/docs/changelog/109410.yaml
deleted file mode 100644
index e8c4dcdab42c6..0000000000000
--- a/docs/changelog/109410.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109410
-summary: Support synthetic source for date fields when `ignore_malformed` is used
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml
new file mode 100644
index 0000000000000..81b7541bde35b
--- /dev/null
+++ b/docs/changelog/109414.yaml
@@ -0,0 +1,6 @@
+pr: 109414
+summary: Don't fail retention lease sync actions due to capacity constraints
+area: CRUD
+type: bug
+issues:
+ - 105926
diff --git a/docs/changelog/109444.yaml b/docs/changelog/109444.yaml
deleted file mode 100644
index 8c56fe2dd9f02..0000000000000
--- a/docs/changelog/109444.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109444
-summary: "Aggs: Scripted metric allow list"
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/109449.yaml b/docs/changelog/109449.yaml
deleted file mode 100644
index 90cb908227f1b..0000000000000
--- a/docs/changelog/109449.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109449
-summary: Reset max page size to settings value
-area: Transform
-type: bug
-issues:
- - 109308
diff --git a/docs/changelog/109462.yaml b/docs/changelog/109462.yaml
deleted file mode 100644
index a05f4a04e80ae..0000000000000
--- a/docs/changelog/109462.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109462
-summary: Add `wait_for_completion` parameter to delete snapshot request
-area: Distributed
-type: enhancement
-issues:
- - 101300
diff --git a/docs/changelog/109470.yaml b/docs/changelog/109470.yaml
deleted file mode 100644
index 837c1664b775a..0000000000000
--- a/docs/changelog/109470.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109470
-summary: Enabling profiling for `RankBuilders` and adding tests for RRF
-area: Ranking
-type: enhancement
-issues: []
diff --git a/docs/changelog/109480.yaml b/docs/changelog/109480.yaml
deleted file mode 100644
index 3a6f48e9bd840..0000000000000
--- a/docs/changelog/109480.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109480
-summary: "[Connector API] Add claim sync job endpoint"
-area: Application
-type: feature
-issues: []
diff --git a/docs/changelog/109481.yaml b/docs/changelog/109481.yaml
deleted file mode 100644
index e8251788a90bd..0000000000000
--- a/docs/changelog/109481.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109481
-summary: Fork freeing search/scroll contexts to GENERIC pool
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/109487.yaml b/docs/changelog/109487.yaml
deleted file mode 100644
index c69c77203f12d..0000000000000
--- a/docs/changelog/109487.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109487
-summary: Start Trained Model Deployment API request query params now override body params
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/109501.yaml b/docs/changelog/109501.yaml
deleted file mode 100644
index 6e81f98816cbf..0000000000000
--- a/docs/changelog/109501.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-pr: 109501
-summary: Reflect latest changes in synthetic source documentation
-area: Mapping
-type: enhancement
-issues: []
-highlight:
- title: Synthetic `_source` improvements
- body: |-
- There are multiple improvements to synthetic `_source` functionality:
-
- * Synthetic `_source` is now supported for all field types including `nested` and `object`. `object` fields are supported with `enabled` set to `false`.
-
- * Synthetic `_source` can be enabled together with `ignore_malformed` and `ignore_above` parameters for all field types that support them.
- notable: false
diff --git a/docs/changelog/109506.yaml b/docs/changelog/109506.yaml
deleted file mode 100644
index 3a7570ed0b93a..0000000000000
--- a/docs/changelog/109506.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109506
-summary: Support synthetic source for `scaled_float` and `unsigned_long` when `ignore_malformed`
- is used
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/109534.yaml b/docs/changelog/109534.yaml
deleted file mode 100644
index c6eb520bb70a8..0000000000000
--- a/docs/changelog/109534.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109534
-summary: Propagate accurate deployment timeout
-area: Machine Learning
-type: bug
-issues:
- - 109407
diff --git a/docs/changelog/109540.yaml b/docs/changelog/109540.yaml
deleted file mode 100644
index 722c60a30fb97..0000000000000
--- a/docs/changelog/109540.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109540
-summary: Add metrics@custom component template to metrics-*-* index template
-area: Data streams
-type: enhancement
-issues:
- - 109475
diff --git a/docs/changelog/109551.yaml b/docs/changelog/109551.yaml
deleted file mode 100644
index f4949669091d9..0000000000000
--- a/docs/changelog/109551.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109551
-summary: Avoid `InferenceRunner` deadlock
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/109554.yaml b/docs/changelog/109554.yaml
deleted file mode 100644
index 4e78a8f3044c7..0000000000000
--- a/docs/changelog/109554.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109554
-summary: "[Query Rules] Add API calls to get or delete individual query rules within\
- \ a ruleset"
-area: Relevance
-type: enhancement
-issues: []
diff --git a/docs/changelog/109563.yaml b/docs/changelog/109563.yaml
deleted file mode 100644
index 9099064b6b040..0000000000000
--- a/docs/changelog/109563.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109563
-summary: Add allocation explain output for THROTTLING shards
-area: Infra/Core
-type: enhancement
-issues: []
diff --git a/docs/changelog/109583.yaml b/docs/changelog/109583.yaml
new file mode 100644
index 0000000000000..84757e307b4fb
--- /dev/null
+++ b/docs/changelog/109583.yaml
@@ -0,0 +1,29 @@
+pr: 109583
+summary: "ESQL: INLINESTATS"
+area: ES|QL
+type: feature
+issues:
+ - 107589
+highlight:
+ title: "ESQL: INLINESTATS"
+ body: |-
+ This adds the `INLINESTATS` command to ESQL which performs a STATS and
+ then enriches the results into the output stream. So, this query:
+
+ [source,esql]
+ ----
+ FROM test
+ | INLINESTATS m=MAX(a * b) BY b
+ | WHERE m == a * b
+ | SORT a DESC, b DESC
+ | LIMIT 3
+ ----
+
+ Produces output like:
+
+ | a | b | m |
+ | --- | --- | ----- |
+ | 99 | 999 | 98901 |
+ | 99 | 998 | 98802 |
+ | 99 | 997 | 98703 |
+ notable: true
diff --git a/docs/changelog/109597.yaml b/docs/changelog/109597.yaml
deleted file mode 100644
index 9b99df85da6a3..0000000000000
--- a/docs/changelog/109597.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109597
-summary: Opt `scripted_metric` out of parallelization
-area: Aggregations
-type: feature
-issues: []
diff --git a/docs/changelog/109603.yaml b/docs/changelog/109603.yaml
deleted file mode 100644
index 2d6e8b94aa8d0..0000000000000
--- a/docs/changelog/109603.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109603
-summary: Update translog `writeLocation` for `flushListener` after commit
-area: Engine
-type: enhancement
-issues: []
diff --git a/docs/changelog/109606.yaml b/docs/changelog/109606.yaml
deleted file mode 100644
index 6c9089c4c4fde..0000000000000
--- a/docs/changelog/109606.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109606
-summary: Avoid NPE if `users_roles` file does not exist
-area: Authentication
-type: bug
-issues: []
diff --git a/docs/changelog/109613.yaml b/docs/changelog/109613.yaml
deleted file mode 100644
index 21d152ac1d6de..0000000000000
--- a/docs/changelog/109613.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109613
-summary: Consider `error_trace` supported by all endpoints
-area: Infra/REST API
-type: bug
-issues:
- - 109612
diff --git a/docs/changelog/109618.yaml b/docs/changelog/109618.yaml
deleted file mode 100644
index f28bb15a53d96..0000000000000
--- a/docs/changelog/109618.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109618
-summary: Fail cluster state API if blocked
-area: Cluster Coordination
-type: bug
-issues:
- - 107503
diff --git a/docs/changelog/109634.yaml b/docs/changelog/109634.yaml
deleted file mode 100644
index 4c6358578b6de..0000000000000
--- a/docs/changelog/109634.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109634
-summary: "[Query Rules] Require Enterprise License for Query Rules"
-area: Relevance
-type: enhancement
-issues: []
diff --git a/docs/changelog/109651.yaml b/docs/changelog/109651.yaml
deleted file mode 100644
index 982e6a5b536cc..0000000000000
--- a/docs/changelog/109651.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109651
-summary: Support synthetic source for `geo_point` when `ignore_malformed` is used
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/109653.yaml b/docs/changelog/109653.yaml
deleted file mode 100644
index 665163ec2a91b..0000000000000
--- a/docs/changelog/109653.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109653
-summary: Handle the "JSON memory allocator bytes" field
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/109657.yaml b/docs/changelog/109657.yaml
deleted file mode 100644
index 35b315b7568c9..0000000000000
--- a/docs/changelog/109657.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109657
-summary: Track `RequestedRangeNotSatisfiedException` separately in S3 Metrics
-area: Snapshot/Restore
-type: enhancement
-issues: []
diff --git a/docs/changelog/109672.yaml b/docs/changelog/109672.yaml
deleted file mode 100644
index bb6532ab7accf..0000000000000
--- a/docs/changelog/109672.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109672
-summary: Log repo UUID at generation/registration time
-area: Snapshot/Restore
-type: enhancement
-issues: []
diff --git a/docs/changelog/109717.yaml b/docs/changelog/109717.yaml
deleted file mode 100644
index 326657ea4ce21..0000000000000
--- a/docs/changelog/109717.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109717
-summary: Bump jackson version in modules:repository-azure
-area: Snapshot/Restore
-type: upgrade
-issues: []
diff --git a/docs/changelog/109720.yaml b/docs/changelog/109720.yaml
deleted file mode 100644
index b029726c84427..0000000000000
--- a/docs/changelog/109720.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109720
-summary: "DocsStats: Add human readable bytesize"
-area: Stats
-type: enhancement
-issues: []
diff --git a/docs/changelog/109746.yaml b/docs/changelog/109746.yaml
deleted file mode 100644
index 5360f545333ac..0000000000000
--- a/docs/changelog/109746.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109746
-summary: ES|QL Add primitive float support to the Compute Engine
-area: ES|QL
-type: enhancement
-issues:
- - 109178
diff --git a/docs/changelog/109779.yaml b/docs/changelog/109779.yaml
deleted file mode 100644
index 4ccd8d475ec8d..0000000000000
--- a/docs/changelog/109779.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109779
-summary: Include component templates in retention validaiton
-area: Data streams
-type: bug
-issues: []
diff --git a/docs/changelog/109781.yaml b/docs/changelog/109781.yaml
deleted file mode 100644
index df74645b53d84..0000000000000
--- a/docs/changelog/109781.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109781
-summary: ES|QL Add primitive float variants of all aggregators to the compute engine
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/109794.yaml b/docs/changelog/109794.yaml
deleted file mode 100644
index d244c69a903ba..0000000000000
--- a/docs/changelog/109794.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109794
-summary: Provide document size reporter with `MapperService`
-area: Infra/Metrics
-type: bug
-issues: []
diff --git a/docs/changelog/109807.yaml b/docs/changelog/109807.yaml
deleted file mode 100644
index 5cf8a2c896c4e..0000000000000
--- a/docs/changelog/109807.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109807
-summary: "ESQL: Fix LOOKUP attribute shadowing"
-area: ES|QL
-type: bug
-issues:
- - 109392
diff --git a/docs/changelog/109813.yaml b/docs/changelog/109813.yaml
deleted file mode 100644
index edcef17e87606..0000000000000
--- a/docs/changelog/109813.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109813
-summary: Add text similarity reranker retriever
-area: Ranking
-type: feature
-issues: []
diff --git a/docs/changelog/109848.yaml b/docs/changelog/109848.yaml
deleted file mode 100644
index 858bbe84ef3a4..0000000000000
--- a/docs/changelog/109848.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109848
-summary: Denser in-memory representation of `ShardBlobsToDelete`
-area: Snapshot/Restore
-type: enhancement
-issues: []
diff --git a/docs/changelog/109873.yaml b/docs/changelog/109873.yaml
deleted file mode 100644
index c77197cc22d0a..0000000000000
--- a/docs/changelog/109873.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109873
-summary: "ESQL: add Arrow dataframes output format"
-area: ES|QL
-type: feature
-issues: []
diff --git a/docs/changelog/109876.yaml b/docs/changelog/109876.yaml
deleted file mode 100644
index 4a65b4e17c4a3..0000000000000
--- a/docs/changelog/109876.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109876
-summary: Always pick the user `maxPageSize` value
-area: Transform
-type: bug
-issues:
- - 109844
diff --git a/docs/changelog/109880.yaml b/docs/changelog/109880.yaml
deleted file mode 100644
index 71c7209824a8a..0000000000000
--- a/docs/changelog/109880.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-pr: 109880
-summary: Deprecate `text_expansion` and `weighted_tokens` queries
-area: Machine Learning
-type: deprecation
-issues: [ ]
-deprecation:
- title: Deprecate `text_expansion` and `weighted_tokens` queries
- area: REST API
- details: The `text_expansion` and `weighted_tokens` queries have been replaced by `sparse_vector`.
- impact: Please update your existing `text_expansion` and `weighted_tokens` queries to use `sparse_vector.`
diff --git a/docs/changelog/109882.yaml b/docs/changelog/109882.yaml
deleted file mode 100644
index 0f0fed01c5a7a..0000000000000
--- a/docs/changelog/109882.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109882
-summary: Support synthetic source together with `ignore_malformed` in histogram fields
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/109893.yaml b/docs/changelog/109893.yaml
deleted file mode 100644
index df6d6e51236c8..0000000000000
--- a/docs/changelog/109893.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109893
-summary: Add Anthropic messages integration to Inference API
-area: Machine Learning
-type: enhancement
-issues: [ ]
diff --git a/docs/changelog/109908.yaml b/docs/changelog/109908.yaml
deleted file mode 100644
index cdf2acf17096c..0000000000000
--- a/docs/changelog/109908.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109908
-summary: "Update checkpoints after post-replication actions, even on failure"
-area: CRUD
-type: bug
-issues: []
diff --git a/docs/changelog/109931.yaml b/docs/changelog/109931.yaml
deleted file mode 100644
index 3575cfd49176f..0000000000000
--- a/docs/changelog/109931.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109931
-summary: Apply FLS to the contents of `IgnoredSourceFieldMapper`
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/109957.yaml b/docs/changelog/109957.yaml
deleted file mode 100644
index 6bbcd8175501c..0000000000000
--- a/docs/changelog/109957.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109957
-summary: Add request metric to `RestController` to track success/failure (by status
- code)
-area: Infra/Metrics
-type: enhancement
-issues: []
diff --git a/docs/changelog/109963.yaml b/docs/changelog/109963.yaml
deleted file mode 100644
index 1745d549582d4..0000000000000
--- a/docs/changelog/109963.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 109963
-summary: Propagate mapper builder context flags across nested mapper builder context
- creation
-area: Mapping
-type: bug
-issues: []
diff --git a/docs/changelog/109967.yaml b/docs/changelog/109967.yaml
deleted file mode 100644
index cfc6b6462954b..0000000000000
--- a/docs/changelog/109967.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109967
-summary: Default the HF service to cosine similarity
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/109981.yaml b/docs/changelog/109981.yaml
deleted file mode 100644
index cf9388f79e29c..0000000000000
--- a/docs/changelog/109981.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109981
-summary: Limit number of synonym rules that can be created
-area: Mapping
-type: bug
-issues: [108785]
diff --git a/docs/changelog/109989.yaml b/docs/changelog/109989.yaml
deleted file mode 100644
index f1f5972b60eb3..0000000000000
--- a/docs/changelog/109989.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109989
-summary: "ESQL: Fix Join references"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/109993.yaml b/docs/changelog/109993.yaml
deleted file mode 100644
index 40d161b6b5c24..0000000000000
--- a/docs/changelog/109993.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 109993
-summary: "[ES|QL] `weighted_avg`"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/110004.yaml b/docs/changelog/110004.yaml
deleted file mode 100644
index f680016527a9c..0000000000000
--- a/docs/changelog/110004.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-pr: 110004
-summary: Mark Query Rules as GA
-area: Relevance
-type: feature
-issues: []
-highlight:
- title: Mark Query Rules as GA
- body: |-
- This PR marks query rules as Generally Available. All APIs are no longer
- in tech preview.
- notable: true
diff --git a/docs/changelog/110016.yaml b/docs/changelog/110016.yaml
deleted file mode 100644
index 28ad55aa796c8..0000000000000
--- a/docs/changelog/110016.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110016
-summary: Opt in keyword field into fallback synthetic source if needed
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/110019.yaml b/docs/changelog/110019.yaml
deleted file mode 100644
index 632e79008d351..0000000000000
--- a/docs/changelog/110019.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110019
-summary: Improve mechanism for extracting the result of a `PlainActionFuture`
-area: Distributed
-type: enhancement
-issues:
- - 108125
diff --git a/docs/changelog/110046.yaml b/docs/changelog/110046.yaml
deleted file mode 100644
index 6ebe440e7aced..0000000000000
--- a/docs/changelog/110046.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110046
-summary: "ESQL: make named params objects truly per request"
-area: ES|QL
-type: bug
-issues:
- - 110028
diff --git a/docs/changelog/110059.yaml b/docs/changelog/110059.yaml
deleted file mode 100644
index ba160c091cdc2..0000000000000
--- a/docs/changelog/110059.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-pr: 110059
-summary: Adds new `bit` `element_type` for `dense_vectors`
-area: Vector Search
-type: feature
-issues: []
-highlight:
- title: Adds new `bit` `element_type` for `dense_vectors`
- body: |-
- This adds `bit` vector support by adding `element_type: bit` for
- vectors. This new element type works for indexed and non-indexed
- vectors. Additionally, it works with `hnsw` and `flat` index types. No
- quantization based codec works with this element type, this is
- consistent with `byte` vectors.
-
- `bit` vectors accept up to `32768` dimensions in size and expect vectors
- that are being indexed to be encoded either as a hexidecimal string or a
- `byte[]` array where each element of the `byte` array represents `8`
- bits of the vector.
-
- `bit` vectors support script usage and regular query usage. When
- indexed, all comparisons done are `xor` and `popcount` summations (aka,
- hamming distance), and the scores are transformed and normalized given
- the vector dimensions.
-
- For scripts, `l1norm` is the same as `hamming` distance and `l2norm` is
- `sqrt(l1norm)`. `dotProduct` and `cosineSimilarity` are not supported.
-
- Note, the dimensions expected by this element_type are always to be
- divisible by `8`, and the `byte[]` vectors provided for index must be
- have size `dim/8` size, where each byte element represents `8` bits of
- the vectors.
- notable: true
diff --git a/docs/changelog/110061.yaml b/docs/changelog/110061.yaml
deleted file mode 100644
index 1880a2a197722..0000000000000
--- a/docs/changelog/110061.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110061
-summary: Avoiding running watch jobs in TickerScheduleTriggerEngine if it is paused
-area: Watcher
-type: bug
-issues:
- - 105933
diff --git a/docs/changelog/110066.yaml b/docs/changelog/110066.yaml
deleted file mode 100644
index 920c6304b63ae..0000000000000
--- a/docs/changelog/110066.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110066
-summary: Support flattened fields and multi-fields as dimensions in downsampling
-area: Downsampling
-type: bug
-issues:
- - 99297
diff --git a/docs/changelog/110096.yaml b/docs/changelog/110096.yaml
deleted file mode 100644
index 3d6616c289266..0000000000000
--- a/docs/changelog/110096.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110096
-summary: Fix `ClassCastException` with MV_EXPAND on missing field
-area: ES|QL
-type: bug
-issues:
- - 109974
diff --git a/docs/changelog/110102.yaml b/docs/changelog/110102.yaml
deleted file mode 100644
index d1b9b53e2dfc5..0000000000000
--- a/docs/changelog/110102.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110102
-summary: Optimize ST_DISTANCE filtering with Lucene circle intersection query
-area: ES|QL
-type: enhancement
-issues:
- - 109972
diff --git a/docs/changelog/110112.yaml b/docs/changelog/110112.yaml
deleted file mode 100644
index eca5fd9af15ce..0000000000000
--- a/docs/changelog/110112.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110112
-summary: Increase response size limit for batched requests
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/110146.yaml b/docs/changelog/110146.yaml
deleted file mode 100644
index 61ba35cec319b..0000000000000
--- a/docs/changelog/110146.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110146
-summary: Fix trailing slash in `ml.get_categories` specification
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/110160.yaml b/docs/changelog/110160.yaml
deleted file mode 100644
index 0c38c23c69067..0000000000000
--- a/docs/changelog/110160.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110160
-summary: Opt in number fields into fallback synthetic source when doc values a…
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/110176.yaml b/docs/changelog/110176.yaml
deleted file mode 100644
index ae1d7d10d6dc4..0000000000000
--- a/docs/changelog/110176.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110176
-summary: Fix trailing slash in two rollup specifications
-area: Rollup
-type: bug
-issues: []
diff --git a/docs/changelog/110177.yaml b/docs/changelog/110177.yaml
deleted file mode 100644
index 0ac5328d88df4..0000000000000
--- a/docs/changelog/110177.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110177
-summary: Fix trailing slash in `security.put_privileges` specification
-area: Authorization
-type: bug
-issues: []
diff --git a/docs/changelog/110179.yaml b/docs/changelog/110179.yaml
deleted file mode 100644
index b99a390c8586f..0000000000000
--- a/docs/changelog/110179.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110179
-summary: Make repository analysis API available to non-operators
-area: Snapshot/Restore
-type: enhancement
-issues:
- - 100318
diff --git a/docs/changelog/110186.yaml b/docs/changelog/110186.yaml
deleted file mode 100644
index 23eaab118e2ab..0000000000000
--- a/docs/changelog/110186.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110186
-summary: Don't sample calls to `ReduceContext#consumeBucketsAndMaybeBreak` ins `InternalDateHistogram`
- and `InternalHistogram` during reduction
-area: Aggregations
-type: bug
-issues: []
diff --git a/docs/changelog/110201.yaml b/docs/changelog/110201.yaml
deleted file mode 100644
index a880638881948..0000000000000
--- a/docs/changelog/110201.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110201
-summary: "ES|QL: Fix DISSECT that overwrites input"
-area: ES|QL
-type: bug
-issues:
- - 110184
diff --git a/docs/changelog/110214.yaml b/docs/changelog/110214.yaml
deleted file mode 100644
index 20f61cac64454..0000000000000
--- a/docs/changelog/110214.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110214
-summary: Handle `ignore_above` in synthetic source for flattened fields
-area: Mapping
-type: enhancement
-issues: []
diff --git a/docs/changelog/110216.yaml b/docs/changelog/110216.yaml
new file mode 100644
index 0000000000000..00ab20b230e2c
--- /dev/null
+++ b/docs/changelog/110216.yaml
@@ -0,0 +1,5 @@
+pr: 110216
+summary: Register SLM run before snapshotting to save stats
+area: ILM+SLM
+type: enhancement
+issues: []
diff --git a/docs/changelog/110233.yaml b/docs/changelog/110233.yaml
deleted file mode 100644
index d9ce4057090a4..0000000000000
--- a/docs/changelog/110233.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110233
-summary: Support k parameter for knn query
-area: Vector Search
-type: enhancement
-issues:
- - 108473
diff --git a/docs/changelog/110234.yaml b/docs/changelog/110234.yaml
deleted file mode 100644
index 0656ba5fb6636..0000000000000
--- a/docs/changelog/110234.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110234
-summary: Upgrade to Lucene-9.11.1
-area: Search
-type: upgrade
-issues: []
diff --git a/docs/changelog/110236.yaml b/docs/changelog/110236.yaml
deleted file mode 100644
index e2dbff7fbf768..0000000000000
--- a/docs/changelog/110236.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-pr: 110236
-summary: '`ParseHeapRatioOrDeprecatedByteSizeValue` for `indices.breaker.total.limit`'
-area: Infra/Settings
-type: deprecation
-issues: []
-deprecation:
- title: 'Deprecate absolute size values for `indices.breaker.total.limit` setting'
- area: Cluster and node setting
- details: Previously, the value of `indices.breaker.total.limit` could be specified as
- an absolute size in bytes. This setting controls the overal amount of
- memory the server is allowed to use before taking remedial actions. Setting
- this to a specific number of bytes led to strange behaviour when the node
- maximum heap size changed because the circut breaker limit would remain
- unchanged. This would either leave the value too low, causing part of the
- heap to remain unused; or it would leave the value too high, causing the
- circuit breaker to be ineffective at preventing OOM errors. The only
- reasonable behaviour for this setting is that it scales with the size of
- the heap, and so absolute byte limits are now deprecated.
- impact: Users must change their configuration to specify a percentage instead of
- an absolute number of bytes for `indices.breaker.total.limit`, or else
- accept the default, which is already specified as a percentage.
diff --git a/docs/changelog/110248.yaml b/docs/changelog/110248.yaml
deleted file mode 100644
index 85739528b69c6..0000000000000
--- a/docs/changelog/110248.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110248
-summary: "[Inference API] Add Amazon Bedrock Support to Inference API"
-area: Machine Learning
-type: enhancement
-issues: [ ]
diff --git a/docs/changelog/110251.yaml b/docs/changelog/110251.yaml
deleted file mode 100644
index a3b0c3128be35..0000000000000
--- a/docs/changelog/110251.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-pr: 110251
-summary: Support index sorting with nested fields
-area: Logs
-type: enhancement
-issues:
- - 107349
-highlight:
- title: Index sorting on indexes with nested fields
- body: |-
- Index sorting is now supported for indexes with mappings containing nested objects.
- The index sort spec (as specified by `index.sort.field`) can't contain any nested
- fields, still.
- notable: false
diff --git a/docs/changelog/110334.yaml b/docs/changelog/110334.yaml
deleted file mode 100644
index f83ac04ded773..0000000000000
--- a/docs/changelog/110334.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110334
-summary: Sentence Chunker
-area: Machine Learning
-type: enhancement
-issues: []
diff --git a/docs/changelog/110337.yaml b/docs/changelog/110337.yaml
deleted file mode 100644
index bf21a95c9157f..0000000000000
--- a/docs/changelog/110337.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110337
-summary: Support `ignore_above` on keyword dimensions
-area: TSDB
-type: enhancement
-issues: []
diff --git a/docs/changelog/110338.yaml b/docs/changelog/110338.yaml
deleted file mode 100644
index 2334a1cbc9283..0000000000000
--- a/docs/changelog/110338.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110338
-summary: Add `semantic_text` field type and `semantic` query
-area: Mapping
-type: feature
-issues: []
diff --git a/docs/changelog/110347.yaml b/docs/changelog/110347.yaml
deleted file mode 100644
index 8727128230935..0000000000000
--- a/docs/changelog/110347.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110347
-summary: "ESQL: Renamed `TopList` to Top"
-area: ES|QL
-type: enhancement
-issues: []
diff --git a/docs/changelog/110361.yaml b/docs/changelog/110361.yaml
deleted file mode 100644
index 8558c88e06049..0000000000000
--- a/docs/changelog/110361.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 110361
-summary: Don't detect `PlainActionFuture` deadlock on concurrent complete
-area: Distributed
-type: bug
-issues:
- - 110181
- - 110360
diff --git a/docs/changelog/110369.yaml b/docs/changelog/110369.yaml
deleted file mode 100644
index 770294605b444..0000000000000
--- a/docs/changelog/110369.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110369
-summary: Run terms concurrently when cardinality is only lower than shard size
-area: Aggregations
-type: bug
-issues:
- - 105505
diff --git a/docs/changelog/110383.yaml b/docs/changelog/110383.yaml
deleted file mode 100644
index 5e9bddd4bfcd2..0000000000000
--- a/docs/changelog/110383.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110383
-summary: Add bulk delete roles API
-area: Security
-type: enhancement
-issues: []
diff --git a/docs/changelog/110391.yaml b/docs/changelog/110391.yaml
deleted file mode 100644
index 1e00eda970398..0000000000000
--- a/docs/changelog/110391.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110391
-summary: Fix ST_DISTANCE Lucene push-down for complex predicates
-area: ES|QL
-type: bug
-issues:
- - 110349
diff --git a/docs/changelog/110395.yaml b/docs/changelog/110395.yaml
deleted file mode 100644
index 690be55abb5b2..0000000000000
--- a/docs/changelog/110395.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-pr: 110395
-summary: Mark the Redact processor as Generally Available
-area: Ingest Node
-type: feature
-issues: []
-highlight:
- title: The Redact processor is Generally Available
- body: The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. The Redact processor was initially released as Technical Preview in `8.7.0`, and is now released as Generally Available.
- notable: true
diff --git a/docs/changelog/110431.yaml b/docs/changelog/110431.yaml
deleted file mode 100644
index 0dd93ef718ef9..0000000000000
--- a/docs/changelog/110431.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110431
-summary: "[Inference API] Fix serialization for inference delete endpoint response"
-area: Machine Learning
-type: bug
-issues: []
diff --git a/docs/changelog/110476.yaml b/docs/changelog/110476.yaml
deleted file mode 100644
index bc12b3711a366..0000000000000
--- a/docs/changelog/110476.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 110476
-summary: Fix bug in union-types with type-casting in grouping key of STATS
-area: ES|QL
-type: bug
-issues:
- - 109922
- - 110477
diff --git a/docs/changelog/110488.yaml b/docs/changelog/110488.yaml
deleted file mode 100644
index fbb439f20fc96..0000000000000
--- a/docs/changelog/110488.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110488
-summary: "ESQL: Validate unique plan attribute names"
-area: ES|QL
-type: bug
-issues:
- - 110541
diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml
new file mode 100644
index 0000000000000..6274c99b09998
--- /dev/null
+++ b/docs/changelog/110524.yaml
@@ -0,0 +1,5 @@
+pr: 110524
+summary: Introduce mode `subobjects=auto` for objects
+area: Mapping
+type: enhancement
+issues: []
diff --git a/docs/changelog/110540.yaml b/docs/changelog/110540.yaml
deleted file mode 100644
index 5e4994da80704..0000000000000
--- a/docs/changelog/110540.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-pr: 110540
-summary: Deprecate using slm privileges to access ilm
-area: ILM+SLM
-type: deprecation
-issues: []
-deprecation:
- title: Deprecate using slm privileges to access ilm
- area: REST API
- details: The `read_slm` privilege can get the ILM status, and
- the `manage_slm` privilege can start and stop ILM. Access to these
- APIs should be granted using the `read_ilm` and `manage_ilm` privileges
- instead. Access to ILM APIs will be removed from SLM privileges in
- a future major release, and is now deprecated.
- impact: Users that need access to the ILM status API should now
- use the `read_ilm` privilege. Users that need to start and stop ILM,
- should use the `manage_ilm` privilege.
diff --git a/docs/changelog/110586.yaml b/docs/changelog/110586.yaml
deleted file mode 100644
index cc2bcb85a2dac..0000000000000
--- a/docs/changelog/110586.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110586
-summary: "ESQL: Fix Max doubles bug with negatives and add tests for Max and Min"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/110630.yaml b/docs/changelog/110630.yaml
new file mode 100644
index 0000000000000..9bf78e1209753
--- /dev/null
+++ b/docs/changelog/110630.yaml
@@ -0,0 +1,5 @@
+pr: 110630
+summary: Telemetry for inference adaptive allocations
+area: Machine Learning
+type: feature
+issues: []
diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml
new file mode 100644
index 0000000000000..d4d1dc68cdbcc
--- /dev/null
+++ b/docs/changelog/110633.yaml
@@ -0,0 +1,5 @@
+pr: 110633
+summary: Add manage roles privilege
+area: Authorization
+type: enhancement
+issues: []
diff --git a/docs/changelog/110651.yaml b/docs/changelog/110651.yaml
deleted file mode 100644
index c25c63ee0284a..0000000000000
--- a/docs/changelog/110651.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110651
-summary: "Remove `default_field: message` from metrics index templates"
-area: Data streams
-type: enhancement
-issues: []
diff --git a/docs/changelog/110665.yaml b/docs/changelog/110665.yaml
deleted file mode 100644
index fa6db3190fe60..0000000000000
--- a/docs/changelog/110665.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110665
-summary: "[ESQL] Fix parsing of large magnitude negative numbers"
-area: ES|QL
-type: bug
-issues:
- - 104323
diff --git a/docs/changelog/110666.yaml b/docs/changelog/110666.yaml
deleted file mode 100644
index d96f8e2024c81..0000000000000
--- a/docs/changelog/110666.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110666
-summary: Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases`
-area: Ingest Node
-type: bug
-issues: []
diff --git a/docs/changelog/110707.yaml b/docs/changelog/110707.yaml
deleted file mode 100644
index e13688c73c743..0000000000000
--- a/docs/changelog/110707.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110707
-summary: Fix issue with returning incomplete fragment for plain highlighter
-area: Highlighting
-type: bug
-issues: []
diff --git a/docs/changelog/110710.yaml b/docs/changelog/110710.yaml
deleted file mode 100644
index bf3349ee25cdd..0000000000000
--- a/docs/changelog/110710.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pr: 110710
-summary: Add a cluster listener to fix missing node features after upgrading from a version prior to 8.13
-area: Infra/Core
-type: bug
-issues:
- - 109254
diff --git a/docs/changelog/110734.yaml b/docs/changelog/110734.yaml
new file mode 100644
index 0000000000000..d6dce144b89cd
--- /dev/null
+++ b/docs/changelog/110734.yaml
@@ -0,0 +1,5 @@
+pr: 110734
+summary: Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up
+area: Machine Learning
+type: bug
+issues: [ ]
diff --git a/docs/changelog/110793.yaml b/docs/changelog/110793.yaml
deleted file mode 100644
index 8f1f3ba9afeb7..0000000000000
--- a/docs/changelog/110793.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-pr: 110793
-summary: Fix for union-types for multiple columns with the same name
-area: ES|QL
-type: bug
-issues:
- - 110490
- - 109916
diff --git a/docs/changelog/110796.yaml b/docs/changelog/110796.yaml
new file mode 100644
index 0000000000000..a54a9a08bbd27
--- /dev/null
+++ b/docs/changelog/110796.yaml
@@ -0,0 +1,5 @@
+pr: 110796
+summary: Remove needless forking to GENERIC in `TransportMultiSearchAction`
+area: Search
+type: bug
+issues: []
diff --git a/docs/changelog/110816.yaml b/docs/changelog/110816.yaml
new file mode 100644
index 0000000000000..bf707376ec9ea
--- /dev/null
+++ b/docs/changelog/110816.yaml
@@ -0,0 +1,6 @@
+pr: 110816
+summary: GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml
+area: Infra/Settings
+type: bug
+issues:
+ - 110815
diff --git a/docs/changelog/110824.yaml b/docs/changelog/110824.yaml
deleted file mode 100644
index 4fe97d6692865..0000000000000
--- a/docs/changelog/110824.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110824
-summary: "[ESQL] Count_distinct(_source) should return a 400"
-area: ES|QL
-type: bug
-issues: []
diff --git a/docs/changelog/110844.yaml b/docs/changelog/110844.yaml
deleted file mode 100644
index ea879f13f3e67..0000000000000
--- a/docs/changelog/110844.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110844
-summary: Directly download commercial ip geolocation databases from providers
-area: Ingest Node
-type: feature
-issues: []
diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml
new file mode 100644
index 0000000000000..214adc97ac7cb
--- /dev/null
+++ b/docs/changelog/110847.yaml
@@ -0,0 +1,5 @@
+pr: 110847
+summary: SLM Interval based scheduling
+area: ILM+SLM
+type: feature
+issues: []
diff --git a/docs/changelog/110901.yaml b/docs/changelog/110901.yaml
new file mode 100644
index 0000000000000..599cb7ce9ec98
--- /dev/null
+++ b/docs/changelog/110901.yaml
@@ -0,0 +1,15 @@
+pr: 110901
+summary: Set lenient to true by default when using updateable synonyms
+area: Analysis
+type: breaking
+issues: []
+breaking:
+ title: Set lenient to true by default when using updateable synonyms
+ area: Analysis
+ details: |
+ When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient`
+ value will now be `true`.
+ impact: |
+ `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by
+ default. This prevents shard initialization errors on invalid synonyms.
+ notable: true
diff --git a/docs/changelog/110906.yaml b/docs/changelog/110906.yaml
deleted file mode 100644
index 6123b1108fd17..0000000000000
--- a/docs/changelog/110906.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110906
-summary: "Add comma before charset parameter in WWW-Authenticate response header"
-area: Authentication
-type: bug
-issues: []
diff --git a/docs/changelog/110922.yaml b/docs/changelog/110922.yaml
deleted file mode 100644
index 6a85ce57de103..0000000000000
--- a/docs/changelog/110922.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110922
-summary: Speed up collecting zero document string terms
-area: Aggregations
-type: enhancement
-issues: []
diff --git a/docs/changelog/110927.yaml b/docs/changelog/110927.yaml
deleted file mode 100644
index 3602ce3e811fa..0000000000000
--- a/docs/changelog/110927.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-pr: 110927
-summary: Fix leak in collapsing search results
-area: Search
-type: bug
-issues: []
diff --git a/docs/changelog/110971.yaml b/docs/changelog/110971.yaml
new file mode 100644
index 0000000000000..3579f77dc0d1d
--- /dev/null
+++ b/docs/changelog/110971.yaml
@@ -0,0 +1,5 @@
+pr: 110971
+summary: "Search in ES|QL: Add MATCH operator"
+area: ES|QL
+type: feature
+issues: []
diff --git a/docs/changelog/110974.yaml b/docs/changelog/110974.yaml
new file mode 100644
index 0000000000000..c9e8c9b78675e
--- /dev/null
+++ b/docs/changelog/110974.yaml
@@ -0,0 +1,5 @@
+pr: 110974
+summary: Add custom rule parameters to force time shift
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/110986.yaml b/docs/changelog/110986.yaml
new file mode 100644
index 0000000000000..4e320b19c9578
--- /dev/null
+++ b/docs/changelog/110986.yaml
@@ -0,0 +1,6 @@
+pr: 110986
+summary: Fix unnecessary mustache template evaluation
+area: Ingest Node
+type: enhancement
+issues:
+ - 110191
diff --git a/docs/changelog/110993.yaml b/docs/changelog/110993.yaml
new file mode 100644
index 0000000000000..9eb653a09e3a4
--- /dev/null
+++ b/docs/changelog/110993.yaml
@@ -0,0 +1,5 @@
+pr: 110993
+summary: Add link to Max Shards Per Node exception message
+area: Distributed
+type: enhancement
+issues: []
diff --git a/docs/changelog/111071.yaml b/docs/changelog/111071.yaml
new file mode 100644
index 0000000000000..5e8ab53db3d03
--- /dev/null
+++ b/docs/changelog/111071.yaml
@@ -0,0 +1,5 @@
+pr: 111071
+summary: Use native scalar scorer for int8_flat index
+area: Vector Search
+type: enhancement
+issues: []
diff --git a/docs/changelog/111091.yaml b/docs/changelog/111091.yaml
new file mode 100644
index 0000000000000..8444681a14a48
--- /dev/null
+++ b/docs/changelog/111091.yaml
@@ -0,0 +1,5 @@
+pr: 111091
+summary: "X-pack/plugin/otel: introduce x-pack-otel plugin"
+area: Data streams
+type: feature
+issues: []
diff --git a/docs/changelog/111105.yaml b/docs/changelog/111105.yaml
new file mode 100644
index 0000000000000..ed32bd1ef7fc3
--- /dev/null
+++ b/docs/changelog/111105.yaml
@@ -0,0 +1,5 @@
+pr: 111105
+summary: "ESQL: TOP aggregation IP support"
+area: ES|QL
+type: feature
+issues: []
diff --git a/docs/changelog/111118.yaml b/docs/changelog/111118.yaml
new file mode 100644
index 0000000000000..c9fe6cb443688
--- /dev/null
+++ b/docs/changelog/111118.yaml
@@ -0,0 +1,5 @@
+pr: 111118
+summary: "[ES|QL] Simplify patterns for subfields"
+area: ES|QL
+type: bug
+issues: []
diff --git a/docs/changelog/111123.yaml b/docs/changelog/111123.yaml
new file mode 100644
index 0000000000000..605b8607f4082
--- /dev/null
+++ b/docs/changelog/111123.yaml
@@ -0,0 +1,5 @@
+pr: 111123
+summary: Add Lucene segment-level fields stats
+area: Mapping
+type: enhancement
+issues: []
diff --git a/docs/changelog/111154.yaml b/docs/changelog/111154.yaml
new file mode 100644
index 0000000000000..3297f5005a811
--- /dev/null
+++ b/docs/changelog/111154.yaml
@@ -0,0 +1,5 @@
+pr: 111154
+summary: EIS integration
+area: Inference
+type: feature
+issues: []
diff --git a/docs/changelog/111181.yaml b/docs/changelog/111181.yaml
new file mode 100644
index 0000000000000..7f9f5937b7652
--- /dev/null
+++ b/docs/changelog/111181.yaml
@@ -0,0 +1,5 @@
+pr: 111181
+summary: "[Inference API] Add Alibaba Cloud AI Search Model support to Inference API"
+area: Machine Learning
+type: enhancement
+issues: [ ]
diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml
new file mode 100644
index 0000000000000..9e56facb60d3a
--- /dev/null
+++ b/docs/changelog/111193.yaml
@@ -0,0 +1,6 @@
+pr: 111193
+summary: Fix cases of collections with one point
+area: Geo
+type: bug
+issues:
+ - 110982
diff --git a/docs/changelog/111212.yaml b/docs/changelog/111212.yaml
new file mode 100644
index 0000000000000..67d1513b3ff6f
--- /dev/null
+++ b/docs/changelog/111212.yaml
@@ -0,0 +1,6 @@
+pr: 111212
+summary: Fix score count validation in reranker response
+area: Ranking
+type: bug
+issues:
+ - 111202
diff --git a/docs/changelog/111215.yaml b/docs/changelog/111215.yaml
new file mode 100644
index 0000000000000..dc044c2283fc4
--- /dev/null
+++ b/docs/changelog/111215.yaml
@@ -0,0 +1,6 @@
+pr: 111215
+summary: Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY`
+ isn't changed as side-effect
+area: ILM+SLM
+type: bug
+issues: []
diff --git a/docs/changelog/111225.yaml b/docs/changelog/111225.yaml
new file mode 100644
index 0000000000000..bcd344847cfd2
--- /dev/null
+++ b/docs/changelog/111225.yaml
@@ -0,0 +1,5 @@
+pr: 111225
+summary: Upgrade Azure SDK
+area: Snapshot/Restore
+type: upgrade
+issues: []
diff --git a/docs/changelog/111238.yaml b/docs/changelog/111238.yaml
new file mode 100644
index 0000000000000..b918b754ff595
--- /dev/null
+++ b/docs/changelog/111238.yaml
@@ -0,0 +1,6 @@
+pr: 111238
+summary: Fix validation of TEXT fields with case insensitive comparison
+area: EQL
+type: bug
+issues:
+ - 111235
diff --git a/docs/changelog/111245.yaml b/docs/changelog/111245.yaml
new file mode 100644
index 0000000000000..384373d52cb20
--- /dev/null
+++ b/docs/changelog/111245.yaml
@@ -0,0 +1,6 @@
+pr: 111245
+summary: Truncating watcher history if it is too large
+area: Watcher
+type: bug
+issues:
+ - 94745
diff --git a/docs/changelog/111274.yaml b/docs/changelog/111274.yaml
new file mode 100644
index 0000000000000..e26bcc03ce118
--- /dev/null
+++ b/docs/changelog/111274.yaml
@@ -0,0 +1,5 @@
+pr: 111274
+summary: Include account name in Azure settings exceptions
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/docs/changelog/111284.yaml b/docs/changelog/111284.yaml
new file mode 100644
index 0000000000000..f87649a134af6
--- /dev/null
+++ b/docs/changelog/111284.yaml
@@ -0,0 +1,6 @@
+pr: 111284
+summary: Update `semantic_text` field to support indexing numeric and boolean data
+ types
+area: Mapping
+type: enhancement
+issues: []
diff --git a/docs/changelog/111285.yaml b/docs/changelog/111285.yaml
new file mode 100644
index 0000000000000..e4856482b4d6e
--- /dev/null
+++ b/docs/changelog/111285.yaml
@@ -0,0 +1,5 @@
+pr: 111285
+summary: "[Bugfix] Add `accessDeclaredMembers` permission to allow search application templates to parse floats"
+area: Relevance
+type: bug
+issues: []
diff --git a/docs/changelog/111311.yaml b/docs/changelog/111311.yaml
new file mode 100644
index 0000000000000..5786e11e885e2
--- /dev/null
+++ b/docs/changelog/111311.yaml
@@ -0,0 +1,6 @@
+pr: 111311
+summary: Adding support for data streams with a match-all template
+area: Data streams
+type: bug
+issues:
+ - 111204
diff --git a/docs/changelog/111315.yaml b/docs/changelog/111315.yaml
new file mode 100644
index 0000000000000..0e2e56898b51c
--- /dev/null
+++ b/docs/changelog/111315.yaml
@@ -0,0 +1,5 @@
+pr: 111315
+summary: Add link to flood-stage watermark exception message
+area: Allocation
+type: enhancement
+issues: []
diff --git a/docs/changelog/111316.yaml b/docs/changelog/111316.yaml
new file mode 100644
index 0000000000000..0d915cd1ec3ea
--- /dev/null
+++ b/docs/changelog/111316.yaml
@@ -0,0 +1,5 @@
+pr: 111316
+summary: "[Service Account] Add `AutoOps` account"
+area: Security
+type: enhancement
+issues: []
diff --git a/docs/changelog/111344.yaml b/docs/changelog/111344.yaml
new file mode 100644
index 0000000000000..3d5988054749d
--- /dev/null
+++ b/docs/changelog/111344.yaml
@@ -0,0 +1,5 @@
+pr: 111344
+summary: Add support for Azure Managed Identity
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/docs/changelog/111367.yaml b/docs/changelog/111367.yaml
new file mode 100644
index 0000000000000..89e6c1d3b4da4
--- /dev/null
+++ b/docs/changelog/111367.yaml
@@ -0,0 +1,5 @@
+pr: 111367
+summary: "ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling"
+area: ES|QL
+type: bug
+issues: []
diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml
new file mode 100644
index 0000000000000..297fa77cd2664
--- /dev/null
+++ b/docs/changelog/111412.yaml
@@ -0,0 +1,6 @@
+pr: 111412
+summary: Make enrich cache based on memory usage
+area: Ingest Node
+type: enhancement
+issues:
+ - 106081
diff --git a/docs/changelog/111420.yaml b/docs/changelog/111420.yaml
new file mode 100644
index 0000000000000..4e2640ac5762a
--- /dev/null
+++ b/docs/changelog/111420.yaml
@@ -0,0 +1,5 @@
+pr: 111420
+summary: "[Query rules] Add `exclude` query rule type"
+area: Relevance
+type: feature
+issues: []
diff --git a/docs/changelog/111437.yaml b/docs/changelog/111437.yaml
new file mode 100644
index 0000000000000..a50312ffdd1aa
--- /dev/null
+++ b/docs/changelog/111437.yaml
@@ -0,0 +1,5 @@
+pr: 111437
+summary: "[ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/111445.yaml b/docs/changelog/111445.yaml
new file mode 100644
index 0000000000000..9ba8e4371bd0c
--- /dev/null
+++ b/docs/changelog/111445.yaml
@@ -0,0 +1,5 @@
+pr: 111445
+summary: Support booleans in routing path
+area: TSDB
+type: enhancement
+issues: []
diff --git a/docs/changelog/111457.yaml b/docs/changelog/111457.yaml
new file mode 100644
index 0000000000000..f4ad4ee53eb0a
--- /dev/null
+++ b/docs/changelog/111457.yaml
@@ -0,0 +1,6 @@
+pr: 111457
+summary: Add support for boolean dimensions
+area: TSDB
+type: enhancement
+issues:
+ - 111338
diff --git a/docs/changelog/111475.yaml b/docs/changelog/111475.yaml
new file mode 100644
index 0000000000000..264c975444868
--- /dev/null
+++ b/docs/changelog/111475.yaml
@@ -0,0 +1,6 @@
+pr: 111475
+summary: "ESQL: Fix for overzealous validation in case of invalid mapped fields"
+area: ES|QL
+type: bug
+issues:
+ - 111452
diff --git a/docs/changelog/111490.yaml b/docs/changelog/111490.yaml
new file mode 100644
index 0000000000000..b67c16189cc62
--- /dev/null
+++ b/docs/changelog/111490.yaml
@@ -0,0 +1,5 @@
+pr: 111490
+summary: Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId`
+area: Machine Learning
+type: bug
+issues: []
diff --git a/docs/changelog/111501.yaml b/docs/changelog/111501.yaml
new file mode 100644
index 0000000000000..a424142376e52
--- /dev/null
+++ b/docs/changelog/111501.yaml
@@ -0,0 +1,6 @@
+pr: 111501
+summary: "[ES|QL] Combine Disjunctive CIDRMatch"
+area: ES|QL
+type: enhancement
+issues:
+ - 105143
diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml
new file mode 100644
index 0000000000000..96e8bd843f750
--- /dev/null
+++ b/docs/changelog/111516.yaml
@@ -0,0 +1,5 @@
+pr: 111516
+summary: Adding support for `allow_partial_search_results` in PIT
+area: Search
+type: enhancement
+issues: []
diff --git a/docs/changelog/111519.yaml b/docs/changelog/111519.yaml
new file mode 100644
index 0000000000000..8cc62fb8ed903
--- /dev/null
+++ b/docs/changelog/111519.yaml
@@ -0,0 +1,5 @@
+pr: 111519
+summary: "ESQL: Don't mutate the `BoolQueryBuilder` in plan"
+area: ES|QL
+type: bug
+issues: []
diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml
new file mode 100644
index 0000000000000..202d16c5a426d
--- /dev/null
+++ b/docs/changelog/111523.yaml
@@ -0,0 +1,5 @@
+pr: 111523
+summary: Search coordinator uses `event.ingested` in cluster state to do rewrites
+area: Search
+type: enhancement
+issues: []
diff --git a/docs/changelog/111535.yaml b/docs/changelog/111535.yaml
new file mode 100644
index 0000000000000..4beebbf28d4e1
--- /dev/null
+++ b/docs/changelog/111535.yaml
@@ -0,0 +1,5 @@
+pr: 111535
+summary: Fix remote cluster credential secure settings reload
+area: Authorization
+type: bug
+issues: []
diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml
new file mode 100644
index 0000000000000..d4c46f485e664
--- /dev/null
+++ b/docs/changelog/111544.yaml
@@ -0,0 +1,5 @@
+pr: 111544
+summary: "ESQL: Strings support for MAX and MIN aggregations"
+area: ES|QL
+type: feature
+issues: []
diff --git a/docs/changelog/111552.yaml b/docs/changelog/111552.yaml
new file mode 100644
index 0000000000000..d9991788d4fa9
--- /dev/null
+++ b/docs/changelog/111552.yaml
@@ -0,0 +1,5 @@
+pr: 111552
+summary: Siem ea 9521 improve test
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/111576.yaml b/docs/changelog/111576.yaml
new file mode 100644
index 0000000000000..6d3c331f4bbd5
--- /dev/null
+++ b/docs/changelog/111576.yaml
@@ -0,0 +1,6 @@
+pr: 111576
+summary: Execute shard snapshot tasks in shard-id order
+area: Snapshot/Restore
+type: enhancement
+issues:
+ - 108739
diff --git a/docs/changelog/111600.yaml b/docs/changelog/111600.yaml
new file mode 100644
index 0000000000000..0c1e01e1c2e23
--- /dev/null
+++ b/docs/changelog/111600.yaml
@@ -0,0 +1,5 @@
+pr: 111600
+summary: Make ecs@mappings work with OTel attributes
+area: Data streams
+type: enhancement
+issues: []
diff --git a/docs/changelog/111624.yaml b/docs/changelog/111624.yaml
new file mode 100644
index 0000000000000..7b04b244ef7a7
--- /dev/null
+++ b/docs/changelog/111624.yaml
@@ -0,0 +1,6 @@
+pr: 111624
+summary: Extend logging for dropped warning headers
+area: Infra/Core
+type: enhancement
+issues:
+ - 90527
diff --git a/docs/changelog/111644.yaml b/docs/changelog/111644.yaml
new file mode 100644
index 0000000000000..3705d697c95e3
--- /dev/null
+++ b/docs/changelog/111644.yaml
@@ -0,0 +1,6 @@
+pr: 111644
+summary: Force using the last centroid during merging
+area: Aggregations
+type: bug
+issues:
+ - 111065
diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml
new file mode 100644
index 0000000000000..077714d15a712
--- /dev/null
+++ b/docs/changelog/111655.yaml
@@ -0,0 +1,5 @@
+pr: 111655
+summary: Migrate Inference to `ChunkedToXContent`
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/111673.yaml b/docs/changelog/111673.yaml
new file mode 100644
index 0000000000000..ebc211633fcab
--- /dev/null
+++ b/docs/changelog/111673.yaml
@@ -0,0 +1,5 @@
+pr: 111673
+summary: Properly handle filters on `TextSimilarityRank` retriever
+area: Ranking
+type: bug
+issues: []
diff --git a/docs/changelog/111683.yaml b/docs/changelog/111683.yaml
new file mode 100644
index 0000000000000..cbb2e5ad71ddc
--- /dev/null
+++ b/docs/changelog/111683.yaml
@@ -0,0 +1,6 @@
+pr: 111683
+summary: Only emit product origin in deprecation log if present
+area: Infra/Logging
+type: bug
+issues:
+ - 81757
diff --git a/docs/changelog/111689.yaml b/docs/changelog/111689.yaml
new file mode 100644
index 0000000000000..ccb3d4d4f87c5
--- /dev/null
+++ b/docs/changelog/111689.yaml
@@ -0,0 +1,6 @@
+pr: 111689
+summary: Add nanos support to `ZonedDateTime` serialization
+area: Infra/Core
+type: enhancement
+issues:
+ - 68292
diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml
new file mode 100644
index 0000000000000..36e715744ad88
--- /dev/null
+++ b/docs/changelog/111690.yaml
@@ -0,0 +1,5 @@
+pr: 111690
+summary: "ESQL: Support INLINESTATS grouped on expressions"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/111729.yaml b/docs/changelog/111729.yaml
new file mode 100644
index 0000000000000..c75c14a997da9
--- /dev/null
+++ b/docs/changelog/111729.yaml
@@ -0,0 +1,6 @@
+pr: 111729
+summary: Speed up dense/sparse vector stats
+area: Vector Search
+type: bug
+issues:
+ - 111715
diff --git a/docs/changelog/111740.yaml b/docs/changelog/111740.yaml
new file mode 100644
index 0000000000000..48b7ee200e45e
--- /dev/null
+++ b/docs/changelog/111740.yaml
@@ -0,0 +1,6 @@
+pr: 111740
+summary: Fix Start Trial API output acknowledgement header for features
+area: License
+type: bug
+issues:
+ - 111739
diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml
new file mode 100644
index 0000000000000..77e0c65005dd6
--- /dev/null
+++ b/docs/changelog/111749.yaml
@@ -0,0 +1,6 @@
+pr: 111749
+summary: "ESQL: Added `mv_percentile` function"
+area: ES|QL
+type: feature
+issues:
+ - 111591
diff --git a/docs/changelog/111756.yaml b/docs/changelog/111756.yaml
new file mode 100644
index 0000000000000..e58345dbe696a
--- /dev/null
+++ b/docs/changelog/111756.yaml
@@ -0,0 +1,6 @@
+pr: 111756
+summary: Fix `NullPointerException` when doing knn search on empty index without dims
+area: Vector Search
+type: bug
+issues:
+ - 111733
diff --git a/docs/changelog/111758.yaml b/docs/changelog/111758.yaml
new file mode 100644
index 0000000000000..c95cdf48bc8a7
--- /dev/null
+++ b/docs/changelog/111758.yaml
@@ -0,0 +1,6 @@
+pr: 111758
+summary: Revert "Avoid bucket copies in Aggs"
+area: Aggregations
+type: bug
+issues:
+ - 111679
diff --git a/docs/changelog/111779.yaml b/docs/changelog/111779.yaml
new file mode 100644
index 0000000000000..52c635490e1e4
--- /dev/null
+++ b/docs/changelog/111779.yaml
@@ -0,0 +1,7 @@
+pr: 111779
+summary: "ESQL: Fix serialization during `can_match`"
+area: ES|QL
+type: bug
+issues:
+ - 111701
+ - 111726
diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml
new file mode 100644
index 0000000000000..00b793a19d9c3
--- /dev/null
+++ b/docs/changelog/111797.yaml
@@ -0,0 +1,6 @@
+pr: 111797
+summary: "ESQL: fix for missing indices error message"
+area: ES|QL
+type: bug
+issues:
+ - 111712
diff --git a/docs/changelog/111807.yaml b/docs/changelog/111807.yaml
new file mode 100644
index 0000000000000..97c5e58461c34
--- /dev/null
+++ b/docs/changelog/111807.yaml
@@ -0,0 +1,5 @@
+pr: 111807
+summary: Explain Function Score Query
+area: Search
+type: bug
+issues: []
diff --git a/docs/changelog/111809.yaml b/docs/changelog/111809.yaml
new file mode 100644
index 0000000000000..5a2f220e3a697
--- /dev/null
+++ b/docs/changelog/111809.yaml
@@ -0,0 +1,5 @@
+pr: 111809
+summary: Add Field caps support for Semantic Text
+area: Mapping
+type: enhancement
+issues: []
diff --git a/docs/changelog/111818.yaml b/docs/changelog/111818.yaml
new file mode 100644
index 0000000000000..c3a632861aae6
--- /dev/null
+++ b/docs/changelog/111818.yaml
@@ -0,0 +1,5 @@
+pr: 111818
+summary: Add tier preference to security index settings allowlist
+area: Security
+type: enhancement
+issues: []
diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml
new file mode 100644
index 0000000000000..c40a9e2aef621
--- /dev/null
+++ b/docs/changelog/111840.yaml
@@ -0,0 +1,5 @@
+pr: 111840
+summary: "ESQL: Add async ID and `is_running` headers to ESQL async query"
+area: ES|QL
+type: feature
+issues: []
diff --git a/docs/changelog/111843.yaml b/docs/changelog/111843.yaml
new file mode 100644
index 0000000000000..c8b20036520f3
--- /dev/null
+++ b/docs/changelog/111843.yaml
@@ -0,0 +1,5 @@
+pr: 111843
+summary: Add maximum nested depth check to WKT parser
+area: Geo
+type: bug
+issues: []
diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml
new file mode 100644
index 0000000000000..3f15e9c20135a
--- /dev/null
+++ b/docs/changelog/111855.yaml
@@ -0,0 +1,5 @@
+pr: 111855
+summary: "ESQL: Profile more timing information"
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/111863.yaml b/docs/changelog/111863.yaml
new file mode 100644
index 0000000000000..1724cd83f984b
--- /dev/null
+++ b/docs/changelog/111863.yaml
@@ -0,0 +1,6 @@
+pr: 111863
+summary: Fixing incorrect bulk request took time
+area: Ingest Node
+type: bug
+issues:
+ - 111854
diff --git a/docs/changelog/111866.yaml b/docs/changelog/111866.yaml
new file mode 100644
index 0000000000000..34bf56da4dc9e
--- /dev/null
+++ b/docs/changelog/111866.yaml
@@ -0,0 +1,6 @@
+pr: 111866
+summary: Fix windows memory locking
+area: Infra/Core
+type: bug
+issues:
+ - 111847
diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml
new file mode 100644
index 0000000000000..26ec90aa6cd4c
--- /dev/null
+++ b/docs/changelog/111874.yaml
@@ -0,0 +1,8 @@
+pr: 111874
+summary: "ESQL: BUCKET: allow numerical spans as whole numbers"
+area: ES|QL
+type: enhancement
+issues:
+ - 104646
+ - 109340
+ - 105375
diff --git a/docs/changelog/111879.yaml b/docs/changelog/111879.yaml
new file mode 100644
index 0000000000000..b8c2111e1d286
--- /dev/null
+++ b/docs/changelog/111879.yaml
@@ -0,0 +1,6 @@
+pr: 111879
+summary: "ESQL: Have BUCKET generate friendlier intervals"
+area: ES|QL
+type: enhancement
+issues:
+ - 110916
diff --git a/docs/changelog/111915.yaml b/docs/changelog/111915.yaml
new file mode 100644
index 0000000000000..f64c45b82d10c
--- /dev/null
+++ b/docs/changelog/111915.yaml
@@ -0,0 +1,6 @@
+pr: 111915
+summary: Fix DLS & FLS sometimes being enforced when it is disabled
+area: Authorization
+type: bug
+issues:
+ - 94709
diff --git a/docs/changelog/111917.yaml b/docs/changelog/111917.yaml
new file mode 100644
index 0000000000000..0dc760d76a698
--- /dev/null
+++ b/docs/changelog/111917.yaml
@@ -0,0 +1,7 @@
+pr: 111917
+summary: "[ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and\
+ \ In at Analyzer"
+area: ES|QL
+type: enhancement
+issues:
+ - 111486
diff --git a/docs/changelog/111932.yaml b/docs/changelog/111932.yaml
new file mode 100644
index 0000000000000..ce840ecebcff0
--- /dev/null
+++ b/docs/changelog/111932.yaml
@@ -0,0 +1,6 @@
+pr: 111932
+summary: Fix union-types where one index is missing the field
+area: ES|QL
+type: bug
+issues:
+ - 111912
diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml
new file mode 100644
index 0000000000000..7d856e29d54c5
--- /dev/null
+++ b/docs/changelog/111937.yaml
@@ -0,0 +1,6 @@
+pr: 111937
+summary: Handle `BigInteger` in xcontent copy
+area: Infra/Core
+type: bug
+issues:
+ - 111812
diff --git a/docs/changelog/111943.yaml b/docs/changelog/111943.yaml
new file mode 100644
index 0000000000000..6b9f03ccee31c
--- /dev/null
+++ b/docs/changelog/111943.yaml
@@ -0,0 +1,6 @@
+pr: 111943
+summary: Fix synthetic source for empty nested objects
+area: Mapping
+type: bug
+issues:
+ - 111811
diff --git a/docs/changelog/111947.yaml b/docs/changelog/111947.yaml
new file mode 100644
index 0000000000000..0aff0b9c7b8be
--- /dev/null
+++ b/docs/changelog/111947.yaml
@@ -0,0 +1,5 @@
+pr: 111947
+summary: Improve performance of grok pattern cycle detection
+area: Ingest Node
+type: bug
+issues: []
diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml
new file mode 100644
index 0000000000000..a3a592abaf1ca
--- /dev/null
+++ b/docs/changelog/111948.yaml
@@ -0,0 +1,5 @@
+pr: 111948
+summary: Upgrade xcontent to Jackson 2.17.0
+area: Infra/Core
+type: upgrade
+issues: []
diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml
new file mode 100644
index 0000000000000..3f23c17d8e652
--- /dev/null
+++ b/docs/changelog/111950.yaml
@@ -0,0 +1,6 @@
+pr: 111950
+summary: "[ES|QL] Name parameter with leading underscore"
+area: ES|QL
+type: enhancement
+issues:
+ - 111821
diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml
new file mode 100644
index 0000000000000..ebc518203b7cc
--- /dev/null
+++ b/docs/changelog/111955.yaml
@@ -0,0 +1,7 @@
+pr: 111955
+summary: Clean up dangling S3 multipart uploads
+area: Snapshot/Restore
+type: enhancement
+issues:
+ - 101169
+ - 44971
diff --git a/docs/changelog/111966.yaml b/docs/changelog/111966.yaml
new file mode 100644
index 0000000000000..facf0a61c4d8a
--- /dev/null
+++ b/docs/changelog/111966.yaml
@@ -0,0 +1,5 @@
+pr: 111966
+summary: No error when `store_array_source` is used without synthetic source
+area: Mapping
+type: bug
+issues: []
diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml
new file mode 100644
index 0000000000000..9d758c76369e9
--- /dev/null
+++ b/docs/changelog/111968.yaml
@@ -0,0 +1,6 @@
+pr: 111968
+summary: "ESQL: don't lose the original casting error message"
+area: ES|QL
+type: bug
+issues:
+ - 111967
diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml
new file mode 100644
index 0000000000000..2d276850c4988
--- /dev/null
+++ b/docs/changelog/111969.yaml
@@ -0,0 +1,5 @@
+pr: 111969
+summary: "[Profiling] add `container.id` field to event index template"
+area: Application
+type: enhancement
+issues: []
diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml
new file mode 100644
index 0000000000000..58477c68f0e7c
--- /dev/null
+++ b/docs/changelog/111972.yaml
@@ -0,0 +1,15 @@
+pr: 111972
+summary: Introduce global retention in data stream lifecycle.
+area: Data streams
+type: feature
+issues: []
+highlight:
+ title: Add global retention in data stream lifecycle
+ body: "Data stream lifecycle now supports configuring retention on a cluster level,\
+ \ namely global retention. Global retention \nallows us to configure two different\
+ \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\
+ \ data streams managed by the data stream lifecycle that do not have retention\n\
+ defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\
+ \ applied to all data streams managed by the data stream lifecycle and it allows\
+ \ any data stream \ndata to be deleted after the `max_retention` has passed."
+ notable: true
diff --git a/docs/changelog/111983.yaml b/docs/changelog/111983.yaml
new file mode 100644
index 0000000000000..d5043d0b44155
--- /dev/null
+++ b/docs/changelog/111983.yaml
@@ -0,0 +1,6 @@
+pr: 111983
+summary: Avoid losing error message in failure collector
+area: ES|QL
+type: bug
+issues:
+ - 111894
diff --git a/docs/changelog/111994.yaml b/docs/changelog/111994.yaml
new file mode 100644
index 0000000000000..ee62651c43987
--- /dev/null
+++ b/docs/changelog/111994.yaml
@@ -0,0 +1,6 @@
+pr: 111994
+summary: Merge multiple ignored source entires for the same field
+area: Logs
+type: bug
+issues:
+ - 111694
diff --git a/docs/changelog/112005.yaml b/docs/changelog/112005.yaml
new file mode 100644
index 0000000000000..2d84381e632b3
--- /dev/null
+++ b/docs/changelog/112005.yaml
@@ -0,0 +1,6 @@
+pr: 112005
+summary: Check for valid `parentDoc` before retrieving its previous
+area: Mapping
+type: bug
+issues:
+ - 111990
diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml
new file mode 100644
index 0000000000000..7afb207864ed7
--- /dev/null
+++ b/docs/changelog/112019.yaml
@@ -0,0 +1,5 @@
+pr: 112019
+summary: Display effective retention in the relevant data stream APIs
+area: Data streams
+type: enhancement
+issues: []
diff --git a/docs/changelog/112038.yaml b/docs/changelog/112038.yaml
new file mode 100644
index 0000000000000..6cbfb373b7420
--- /dev/null
+++ b/docs/changelog/112038.yaml
@@ -0,0 +1,6 @@
+pr: 112038
+summary: Semantic reranking should fail whenever inference ID does not exist
+area: Relevance
+type: bug
+issues:
+ - 111934
diff --git a/docs/changelog/112046.yaml b/docs/changelog/112046.yaml
new file mode 100644
index 0000000000000..f3cda1ed7a7d2
--- /dev/null
+++ b/docs/changelog/112046.yaml
@@ -0,0 +1,5 @@
+pr: 112046
+summary: Fix calculation of parent offset for ignored source in some cases
+area: Mapping
+type: bug
+issues: []
diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml
new file mode 100644
index 0000000000000..e974b3413582e
--- /dev/null
+++ b/docs/changelog/112058.yaml
@@ -0,0 +1,5 @@
+pr: 112058
+summary: Fix RRF validation for `rank_constant` < 1
+area: Ranking
+type: bug
+issues: []
diff --git a/docs/changelog/112066.yaml b/docs/changelog/112066.yaml
new file mode 100644
index 0000000000000..5dd846766bc8e
--- /dev/null
+++ b/docs/changelog/112066.yaml
@@ -0,0 +1,6 @@
+pr: 112066
+summary: Do not treat replica as unassigned if primary recently created and unassigned
+ time is below a threshold
+area: Health
+type: enhancement
+issues: []
diff --git a/docs/changelog/112090.yaml b/docs/changelog/112090.yaml
new file mode 100644
index 0000000000000..6d6e4d0851523
--- /dev/null
+++ b/docs/changelog/112090.yaml
@@ -0,0 +1,6 @@
+pr: 112090
+summary: Always check `crsType` when folding spatial functions
+area: Geo
+type: bug
+issues:
+ - 112089
diff --git a/docs/changelog/112100.yaml b/docs/changelog/112100.yaml
new file mode 100644
index 0000000000000..9135edecb4d77
--- /dev/null
+++ b/docs/changelog/112100.yaml
@@ -0,0 +1,5 @@
+pr: 112100
+summary: Exclude internal data streams from global retention
+area: Data streams
+type: bug
+issues: []
diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml
new file mode 100644
index 0000000000000..0c0d7ac44cd17
--- /dev/null
+++ b/docs/changelog/112123.yaml
@@ -0,0 +1,5 @@
+pr: 112123
+summary: SLM interval schedule followup - add back `getFieldName` style getters
+area: ILM+SLM
+type: enhancement
+issues: []
diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml
new file mode 100644
index 0000000000000..f6a7aeb893a5e
--- /dev/null
+++ b/docs/changelog/112126.yaml
@@ -0,0 +1,5 @@
+pr: 112126
+summary: Add support for spatial relationships in point field mapper
+area: Geo
+type: enhancement
+issues: []
diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml
new file mode 100644
index 0000000000000..11109402b7373
--- /dev/null
+++ b/docs/changelog/112133.yaml
@@ -0,0 +1,5 @@
+pr: 112133
+summary: Add telemetry for repository usage
+area: Snapshot/Restore
+type: enhancement
+issues: []
diff --git a/docs/changelog/112139.yaml b/docs/changelog/112139.yaml
new file mode 100644
index 0000000000000..d6d992ec1dcf2
--- /dev/null
+++ b/docs/changelog/112139.yaml
@@ -0,0 +1,6 @@
+pr: 112139
+summary: Fix NPE when executing doc value queries over shape geometries with empty
+ segments
+area: Geo
+type: bug
+issues: []
diff --git a/docs/changelog/112151.yaml b/docs/changelog/112151.yaml
new file mode 100644
index 0000000000000..f5cbfd8da07c2
--- /dev/null
+++ b/docs/changelog/112151.yaml
@@ -0,0 +1,5 @@
+pr: 112151
+summary: Store original source for keywords using a normalizer
+area: Logs
+type: enhancement
+issues: []
diff --git a/docs/changelog/112173.yaml b/docs/changelog/112173.yaml
new file mode 100644
index 0000000000000..9a43b0d1bf1fa
--- /dev/null
+++ b/docs/changelog/112173.yaml
@@ -0,0 +1,7 @@
+pr: 112173
+summary: Prevent synthetic field loaders accessing stored fields from using stale
+ data
+area: Mapping
+type: bug
+issues:
+ - 112156
diff --git a/docs/changelog/112178.yaml b/docs/changelog/112178.yaml
new file mode 100644
index 0000000000000..f1011291542b8
--- /dev/null
+++ b/docs/changelog/112178.yaml
@@ -0,0 +1,6 @@
+pr: 112178
+summary: Avoid wrapping rejection exception in exchange
+area: ES|QL
+type: bug
+issues:
+ - 112106
diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml
new file mode 100644
index 0000000000000..eb22f215f9828
--- /dev/null
+++ b/docs/changelog/112199.yaml
@@ -0,0 +1,5 @@
+pr: 112199
+summary: Support docvalues only query in shape field
+area: Geo
+type: enhancement
+issues: []
diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml
new file mode 100644
index 0000000000000..0c2c3d71e3ddf
--- /dev/null
+++ b/docs/changelog/112200.yaml
@@ -0,0 +1,6 @@
+pr: 112200
+summary: "ES|QL: better validation of GROK patterns"
+area: ES|QL
+type: bug
+issues:
+ - 112111
diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml
new file mode 100644
index 0000000000000..430f95a72bb3f
--- /dev/null
+++ b/docs/changelog/112214.yaml
@@ -0,0 +1,5 @@
+pr: 112214
+summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read'
+area: Infra/Core
+type: bug
+issues: []
diff --git a/docs/changelog/112217.yaml b/docs/changelog/112217.yaml
new file mode 100644
index 0000000000000..bb367d6128001
--- /dev/null
+++ b/docs/changelog/112217.yaml
@@ -0,0 +1,5 @@
+pr: 112217
+summary: Fix template alias parsing livelock
+area: Indices APIs
+type: bug
+issues: []
diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml
new file mode 100644
index 0000000000000..c426dd7ade4ed
--- /dev/null
+++ b/docs/changelog/112218.yaml
@@ -0,0 +1,9 @@
+pr: 112218
+summary: "ESQL: Fix a bug in `MV_PERCENTILE`"
+area: ES|QL
+type: bug
+issues:
+ - 112193
+ - 112180
+ - 112187
+ - 112188
diff --git a/docs/changelog/112226.yaml b/docs/changelog/112226.yaml
new file mode 100644
index 0000000000000..ac36c0c0fe4e2
--- /dev/null
+++ b/docs/changelog/112226.yaml
@@ -0,0 +1,6 @@
+pr: 112226
+summary: "Fix \"unexpected field [remote_cluster]\" for CCS (RCS 1.0) when using API\
+ \ key that references `remote_cluster`"
+area: Security
+type: bug
+issues: []
diff --git a/docs/changelog/112230.yaml b/docs/changelog/112230.yaml
new file mode 100644
index 0000000000000..ef12dc3f78267
--- /dev/null
+++ b/docs/changelog/112230.yaml
@@ -0,0 +1,5 @@
+pr: 112230
+summary: Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo
+area: Security
+type: bug
+issues: []
diff --git a/docs/changelog/112242.yaml b/docs/changelog/112242.yaml
new file mode 100644
index 0000000000000..7292a00166de2
--- /dev/null
+++ b/docs/changelog/112242.yaml
@@ -0,0 +1,5 @@
+pr: 112242
+summary: Fix toReleaseVersion() when called on the current version id
+area: Infra/Core
+type: bug
+issues: [111900]
diff --git a/docs/changelog/112260.yaml b/docs/changelog/112260.yaml
new file mode 100644
index 0000000000000..3f5642188a367
--- /dev/null
+++ b/docs/changelog/112260.yaml
@@ -0,0 +1,6 @@
+pr: 112260
+summary: Fix DLS over Runtime Fields
+area: "Authorization"
+type: bug
+issues:
+ - 111637
diff --git a/docs/changelog/112270.yaml b/docs/changelog/112270.yaml
new file mode 100644
index 0000000000000..1e6b9c7fc9290
--- /dev/null
+++ b/docs/changelog/112270.yaml
@@ -0,0 +1,5 @@
+pr: 112270
+summary: Support sparse embedding models in the elasticsearch inference service
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/112273.yaml b/docs/changelog/112273.yaml
new file mode 100644
index 0000000000000..3182a1884a145
--- /dev/null
+++ b/docs/changelog/112273.yaml
@@ -0,0 +1,5 @@
+pr: 111181
+summary: "[Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API"
+area: Machine Learning
+type: enhancement
+issues: [ ]
diff --git a/docs/changelog/112277.yaml b/docs/changelog/112277.yaml
new file mode 100644
index 0000000000000..eac474555999a
--- /dev/null
+++ b/docs/changelog/112277.yaml
@@ -0,0 +1,5 @@
+pr: 112277
+summary: Upgrade `repository-azure` dependencies
+area: Snapshot/Restore
+type: upgrade
+issues: []
diff --git a/docs/changelog/112320.yaml b/docs/changelog/112320.yaml
new file mode 100644
index 0000000000000..d35a08dfa4e91
--- /dev/null
+++ b/docs/changelog/112320.yaml
@@ -0,0 +1,5 @@
+pr: 112320
+summary: Upgrade xcontent to Jackson 2.17.2
+area: Infra/Core
+type: upgrade
+issues: []
diff --git a/docs/changelog/112341.yaml b/docs/changelog/112341.yaml
new file mode 100644
index 0000000000000..8f44b53ad9998
--- /dev/null
+++ b/docs/changelog/112341.yaml
@@ -0,0 +1,5 @@
+pr: 112341
+summary: Fix DLS using runtime fields and synthetic source
+area: Authorization
+type: bug
+issues: []
diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc
index d7d837b2f8364..16879450c65d8 100644
--- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc
+++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc
@@ -6,6 +6,8 @@
A metric aggregation that executes using scripts to provide a metric output.
+WARNING: `scripted_metric` is not available in {serverless-full}.
+
WARNING: Using scripts can result in slower search speeds. See
<>.
@@ -127,7 +129,7 @@ init_script:: Executed prior to any collection of documents. Allows the ag
+
In the above example, the `init_script` creates an array `transactions` in the `state` object.
-map_script:: Executed once per document collected. This is a required script.
+map_script:: Executed once per document collected. This is a required script.
+
In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field
is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added
@@ -282,4 +284,4 @@ params:: Optional. An object whose contents will be passed as variable
If a parent bucket of the scripted metric aggregation does not collect any documents an empty aggregation response will be returned from the
shard with a `null` value. In this case the `reduce_script`'s `states` variable will contain `null` as a response from that shard.
-`reduce_script`'s should therefore expect and deal with `null` responses from shards.
+`reduce_script`'s should therefore expect and deal with `null` responses from shards.
diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
index e37118019a55c..f0fa4f30fd83f 100644
--- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
@@ -87,8 +87,8 @@ changes to synonym files. Only to be used for search analyzers.
* `expand` (defaults to `true`).
Expands definitions for equivalent synonym rules.
See <>.
-* `lenient` (defaults to `false`).
-If `true` ignores errors while parsing the synonym configuration.
+* `lenient` (defaults to the value of the `updateable` setting).
+If `true` ignores errors while parsing the synonym rules.
It is important to note that only those synonym rules which cannot get parsed are ignored.
See <> for an example of `lenient` behaviour for invalid synonym rules.
@@ -181,11 +181,11 @@ This can can cause errors on the synonym rule.
[WARNING]
====
-Invalid synonym rules can cause errors when applying analyzer changes.
+If `lenient` is set to `false`, invalid synonym rules can cause errors when applying analyzer changes.
For reloadable analyzers, this prevents reloading and applying changes.
You must correct errors in the synonym rules and reload the analyzer.
-An index with invalid synonym rules cannot be reopened, making it inoperable when:
+When `lenient` is set to `false`, an index with invalid synonym rules cannot be reopened, making it inoperable when:
* A node containing the index starts
* The index is opened from a closed state
diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
index 1658f016db60b..b0020a1120fc0 100644
--- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
@@ -75,8 +75,8 @@ changes to synonym files. Only to be used for search analyzers.
* `expand` (defaults to `true`).
Expands definitions for equivalent synonym rules.
See <>.
-* `lenient` (defaults to `false`).
-If `true` ignores errors while parsing the synonym configuration.
+* `lenient` (defaults to the value of the `updateable` setting).
+If `true` ignores errors while parsing the synonym rules.
It is important to note that only those synonym rules which cannot get parsed are ignored.
See <> for an example of `lenient` behaviour for invalid synonym rules.
@@ -169,11 +169,11 @@ This can can cause errors on the synonym rule.
[WARNING]
====
-Invalid synonym rules can cause errors when applying analyzer changes.
+If `lenient` is set to `false`, invalid synonym rules can cause errors when applying analyzer changes.
For reloadable analyzers, this prevents reloading and applying changes.
You must correct errors in the synonym rules and reload the analyzer.
-An index with invalid synonym rules cannot be reopened, making it inoperable when:
+When `lenient` is set to `false`, an index with invalid synonym rules cannot be reopened, making it inoperable when:
* A node containing the index starts
* The index is opened from a closed state
diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc
index 25881b707d724..f8d925945401e 100644
--- a/docs/reference/api-conventions.asciidoc
+++ b/docs/reference/api-conventions.asciidoc
@@ -334,6 +334,7 @@ All REST API parameters (both request parameters and JSON body) support
providing boolean "false" as the value `false` and boolean "true" as the
value `true`. All other values will raise an error.
+[[api-conventions-number-values]]
[discrete]
=== Number Values
diff --git a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc
index 090eda5ef5436..e4da2c45ee978 100644
--- a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc
+++ b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc
@@ -4,7 +4,7 @@
NOTE: {cloud-only}
-You can use the following APIs to perform autoscaling operations.
+You can use the following APIs to perform {cloud}/ec-autoscaling.html[autoscaling operations].
[discrete]
[[autoscaling-api-top-level]]
diff --git a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc
index 608b7bd7cb903..190428485a003 100644
--- a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc
+++ b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc
@@ -7,7 +7,7 @@
NOTE: {cloud-only}
-Delete autoscaling policy.
+Delete {cloud}/ec-autoscaling.html[autoscaling] policy.
[[autoscaling-delete-autoscaling-policy-request]]
==== {api-request-title}
diff --git a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc
index 05724b9c48b6e..d635d8c8f7bd0 100644
--- a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc
+++ b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc
@@ -7,7 +7,7 @@
NOTE: {cloud-only}
-Get autoscaling capacity.
+Get {cloud}/ec-autoscaling.html[autoscaling] capacity.
[[autoscaling-get-autoscaling-capacity-request]]
==== {api-request-title}
diff --git a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc
index ad00d69d1aeb2..973eedcb361c9 100644
--- a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc
+++ b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc
@@ -7,7 +7,7 @@
NOTE: {cloud-only}
-Get autoscaling policy.
+Get {cloud}/ec-autoscaling.html[autoscaling] policy.
[[autoscaling-get-autoscaling-policy-request]]
==== {api-request-title}
diff --git a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc
index ff79def51ebb9..e564f83411eb4 100644
--- a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc
+++ b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc
@@ -7,7 +7,7 @@
NOTE: {cloud-only}
-Creates or updates an autoscaling policy.
+Creates or updates an {cloud}/ec-autoscaling.html[autoscaling] policy.
[[autoscaling-put-autoscaling-policy-request]]
==== {api-request-title}
diff --git a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc
index c46d1dffe2cc8..5a8b009d9f063 100644
--- a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc
@@ -6,7 +6,7 @@ experimental[]
[WARNING]
The fixed decider is intended for testing only. Do not use this decider in production.
-The `fixed` decider responds with a fixed required capacity. It is not enabled
+The {cloud}/ec-autoscaling.html[autoscaling] `fixed` decider responds with a fixed required capacity. It is not enabled
by default but can be enabled for any policy by explicitly configuring it.
==== Configuration settings
diff --git a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc
index 832cf330053aa..0fc9ad444a213 100644
--- a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc
@@ -2,7 +2,7 @@
[[autoscaling-frozen-existence-decider]]
=== Frozen existence decider
-The frozen existence decider (`frozen_existence`) ensures that once the first
+The {cloud}/ec-autoscaling.html[autoscaling] frozen existence decider (`frozen_existence`) ensures that once the first
index enters the frozen ILM phase, the frozen tier is scaled into existence.
The frozen existence decider is enabled for all policies governing frozen data
diff --git a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc
index ab11da04c8642..1977f95797ef0 100644
--- a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc
@@ -2,7 +2,7 @@
[[autoscaling-frozen-shards-decider]]
=== Frozen shards decider
-The frozen shards decider (`frozen_shards`) calculates the memory required to search
+The {cloud}/ec-autoscaling.html[autoscaling] frozen shards decider (`frozen_shards`) calculates the memory required to search
the current set of partially mounted indices in the frozen tier. Based on a
required memory amount per shard, it calculates the necessary memory in the frozen tier.
diff --git a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc
index 5a10f31f1365b..3a8e7cdb518b3 100644
--- a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc
@@ -2,7 +2,7 @@
[[autoscaling-frozen-storage-decider]]
=== Frozen storage decider
-The frozen storage decider (`frozen_storage`) calculates the local storage
+The {cloud}/ec-autoscaling.html[autoscaling] frozen storage decider (`frozen_storage`) calculates the local storage
required to search the current set of partially mounted indices based on a
percentage of the total data set size of such indices. It signals that
additional storage capacity is necessary when existing capacity is less than the
diff --git a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc
index 26ced6ad7bb26..5432d96a47edb 100644
--- a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc
@@ -2,7 +2,7 @@
[[autoscaling-machine-learning-decider]]
=== Machine learning decider
-The {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml}
+The {cloud}/ec-autoscaling.html[autoscaling] {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml}
jobs and trained models.
The {ml} decider is enabled for policies governing `ml` nodes.
diff --git a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc
index 763f1de96f6b9..33c989f3b12eb 100644
--- a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc
@@ -2,7 +2,7 @@
[[autoscaling-proactive-storage-decider]]
=== Proactive storage decider
-The proactive storage decider (`proactive_storage`) calculates the storage required to contain
+The {cloud}/ec-autoscaling.html[autoscaling] proactive storage decider (`proactive_storage`) calculates the storage required to contain
the current data set plus an estimated amount of expected additional data.
The proactive storage decider is enabled for all policies governing nodes with the `data_hot` role.
diff --git a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc
index 50897178a88de..7c38df75169fd 100644
--- a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc
+++ b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc
@@ -2,7 +2,7 @@
[[autoscaling-reactive-storage-decider]]
=== Reactive storage decider
-The reactive storage decider (`reactive_storage`) calculates the storage required to contain
+The {cloud}/ec-autoscaling.html[autoscaling] reactive storage decider (`reactive_storage`) calculates the storage required to contain
the current data set. It signals that additional storage capacity is necessary
when existing capacity has been exceeded (reactively).
diff --git a/docs/reference/autoscaling/index.asciidoc b/docs/reference/autoscaling/index.asciidoc
index fbf1a9536973e..e70c464889419 100644
--- a/docs/reference/autoscaling/index.asciidoc
+++ b/docs/reference/autoscaling/index.asciidoc
@@ -4,7 +4,7 @@
NOTE: {cloud-only}
-The autoscaling feature enables an operator to configure tiers of nodes that
+The {cloud}/ec-autoscaling.html[autoscaling] feature enables an operator to configure tiers of nodes that
self-monitor whether or not they need to scale based on an operator-defined
policy. Then, via the autoscaling API, an Elasticsearch cluster can report
whether or not it needs additional resources to meet the policy. For example, an
diff --git a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc
index 9b15bcca3fc85..a6894a933b460 100644
--- a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc
+++ b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc
@@ -17,7 +17,7 @@ PUT _application/analytics/my_analytics_collection
////
-Removes an Analytics Collection and its associated data stream.
+Removes a <> Collection and its associated data stream.
[[delete-analytics-collection-request]]
==== {api-request-title}
diff --git a/docs/reference/behavioral-analytics/apis/index.asciidoc b/docs/reference/behavioral-analytics/apis/index.asciidoc
index 042b50259b1bb..692d3374f89f5 100644
--- a/docs/reference/behavioral-analytics/apis/index.asciidoc
+++ b/docs/reference/behavioral-analytics/apis/index.asciidoc
@@ -9,7 +9,7 @@ beta::[]
---
-Use the following APIs to manage tasks and resources related to Behavioral Analytics:
+Use the following APIs to manage tasks and resources related to <>:
* <>
* <>
diff --git a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc
index 8d2491ff8a6ee..14511a1258278 100644
--- a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc
+++ b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc
@@ -24,7 +24,7 @@ DELETE _application/analytics/my_analytics_collection2
// TEARDOWN
////
-Returns information about Analytics Collections.
+Returns information about <> Collections.
[[list-analytics-collection-request]]
==== {api-request-title}
diff --git a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc
index 84d9cb5351799..f82717e22ed34 100644
--- a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc
+++ b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc
@@ -22,7 +22,7 @@ DELETE _application/analytics/my_analytics_collection
// TEARDOWN
////
-Post an event to an Analytics Collection.
+Post an event to a <> Collection.
[[post-analytics-collection-event-request]]
==== {api-request-title}
diff --git a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc
index 48273fb3906c4..cbbab2ae3e26c 100644
--- a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc
+++ b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc
@@ -16,7 +16,7 @@ DELETE _application/analytics/my_analytics_collection
// TEARDOWN
////
-Creates an Analytics Collection.
+Creates a <> Collection.
[[put-analytics-collection-request]]
==== {api-request-title}
diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc
index 058f4e69ae8e3..c3292fc9971ee 100644
--- a/docs/reference/cat/recovery.asciidoc
+++ b/docs/reference/cat/recovery.asciidoc
@@ -39,7 +39,7 @@ The cat recovery API returns information about shard recoveries, both
ongoing and completed. It is a more compact view of the JSON
<> API.
-include::{es-ref-dir}/indices/recovery.asciidoc[tag=shard-recovery-desc]
+include::{es-ref-dir}/modules/shard-recovery-desc.asciidoc[]
[[cat-recovery-path-params]]
diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc
index 1c72fb8742b93..b510163bab50b 100644
--- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc
+++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc
@@ -5,7 +5,7 @@
Delete auto-follow pattern
++++
-Delete auto-follow patterns.
+Delete {ccr} <>.
[[ccr-delete-auto-follow-pattern-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc
index 46ef288b05088..a2969e993ddfb 100644
--- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc
+++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc
@@ -5,7 +5,7 @@
Get auto-follow pattern
++++
-Get auto-follow patterns.
+Get {ccr} <>.
[[ccr-get-auto-follow-pattern-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc
index 1e64ab813e2ad..c5ae5a7b4af9d 100644
--- a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc
+++ b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc
@@ -5,7 +5,7 @@
Pause auto-follow pattern
++++
-Pauses an auto-follow pattern.
+Pauses a {ccr} <>.
[[ccr-pause-auto-follow-pattern-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc
index d08997068f705..6769f21ca5cef 100644
--- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc
+++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc
@@ -5,7 +5,7 @@
Create auto-follow pattern
++++
-Creates an auto-follow pattern.
+Creates a {ccr} <>.
[[ccr-put-auto-follow-pattern-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc
index 04da9b4a35ba0..a580bb3838f9b 100644
--- a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc
+++ b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc
@@ -5,7 +5,7 @@
Resume auto-follow pattern
++++
-Resumes an auto-follow pattern.
+Resumes a {ccr} <>.
[[ccr-resume-auto-follow-pattern-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc
index 0c9f033639eda..ae94e1931af85 100644
--- a/docs/reference/ccr/apis/ccr-apis.asciidoc
+++ b/docs/reference/ccr/apis/ccr-apis.asciidoc
@@ -2,7 +2,7 @@
[[ccr-apis]]
== {ccr-cap} APIs
-You can use the following APIs to perform {ccr} operations.
+You can use the following APIs to perform <> operations.
[discrete]
[[ccr-api-top-level]]
diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc
index 68fd6e210f884..6c049d9c92b59 100644
--- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc
+++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc
@@ -5,7 +5,7 @@
Get follower info
++++
-Retrieves information about all follower indices.
+Retrieves information about all <> follower indices.
[[ccr-get-follow-info-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc
index 72224cc7f51f4..4892f86b3523d 100644
--- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc
+++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc
@@ -5,7 +5,7 @@
Get follower stats
++++
-Get follower stats.
+Get <> follower stats.
[[ccr-get-follow-stats-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc
index ea7e8640056bf..1917c08d6640d 100644
--- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc
+++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc
@@ -5,7 +5,7 @@
Forget follower
++++
-Removes the follower retention leases from the leader.
+Removes the <> follower retention leases from the leader.
[[ccr-post-forget-follower-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc
index a4ab69aba8d84..6d4730d10efe6 100644
--- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc
+++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc
@@ -5,7 +5,7 @@
Pause follower
++++
-Pauses a follower index.
+Pauses a <> follower index.
[[ccr-post-pause-follow-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc
index 47ba51a3fb8a0..b023a8cb5cb70 100644
--- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc
+++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc
@@ -5,7 +5,7 @@
Resume follower
++++
-Resumes a follower index.
+Resumes a <> follower index.
[[ccr-post-resume-follow-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc
index b96777b455d3b..dab11ef9e7a54 100644
--- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc
+++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc
@@ -5,7 +5,7 @@
Unfollow
++++
-Converts a follower index to a regular index.
+Converts a <> follower index to a regular index.
[[ccr-post-unfollow-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc
index eb83e2a13dcf1..b7ae9ac987474 100644
--- a/docs/reference/ccr/apis/follow/put-follow.asciidoc
+++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc
@@ -5,7 +5,7 @@
Create follower
++++
-Creates a follower index.
+Creates a <> follower index.
[[ccr-put-follow-request]]
==== {api-request-title}
diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc
index 128df5e47c777..92e6bae0bdce8 100644
--- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc
+++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc
@@ -6,7 +6,7 @@
Get {ccr-init} stats
++++
-Get {ccr} stats.
+Get <> stats.
[[ccr-get-stats-request]]
==== {api-request-title}
diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc
index 0b0fde6546c29..7547dd74c5ecd 100644
--- a/docs/reference/cluster/allocation-explain.asciidoc
+++ b/docs/reference/cluster/allocation-explain.asciidoc
@@ -4,7 +4,7 @@
Cluster allocation explain
++++
-Provides an explanation for a shard's current allocation.
+Provides an explanation for a shard's current <>.
[source,console]
----
@@ -81,6 +81,7 @@ you might expect otherwise.
===== Unassigned primary shard
+====== Conflicting settings
The following request gets an allocation explanation for an unassigned primary
shard.
@@ -158,6 +159,56 @@ node.
<5> The decider which led to the `no` decision for the node.
<6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. In this example, a newly created index has <> that requires that it only be allocated to a node named `nonexistent_node`, which does not exist, so the index is unable to allocate.
+====== Maximum number of retries exceeded
+
+The following response contains an allocation explanation for an unassigned
+primary shard that has reached the maximum number of allocation retry attempts.
+
+[source,js]
+----
+{
+ "index" : "my-index-000001",
+ "shard" : 0,
+ "primary" : true,
+ "current_state" : "unassigned",
+ "unassigned_info" : {
+ "at" : "2017-01-04T18:03:28.464Z",
+ "failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException",
+ "reason": "ALLOCATION_FAILED",
+ "failed_allocation_attempts": 5,
+ "last_allocation_status": "no",
+ },
+ "can_allocate": "no",
+ "allocate_explanation": "cannot allocate because allocation is not permitted to any of the nodes",
+ "node_allocation_decisions" : [
+ {
+ "node_id" : "3sULLVJrRneSg0EfBB-2Ew",
+ "node_name" : "node_t0",
+ "transport_address" : "127.0.0.1:9400",
+ "roles" : ["data_content", "data_hot"],
+ "node_decision" : "no",
+ "store" : {
+ "matching_size" : "4.2kb",
+ "matching_size_in_bytes" : 4325
+ },
+ "deciders" : [
+ {
+ "decider": "max_retry",
+ "decision" : "NO",
+ "explanation": "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2024-07-30T21:04:12.166Z], failed_attempts[5], failed_nodes[[mEKjwwzLT1yJVb8UxT6anw]], delayed=false, details[failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException], allocation_status[deciders_no]]]"
+ }
+ ]
+ }
+ ]
+}
+----
+// NOTCONSOLE
+
+If decider message indicates a transient allocation issue, use
+<> to retry allocation.
+
+====== No valid shard copy
+
The following response contains an allocation explanation for an unassigned
primary shard that was previously allocated.
@@ -184,6 +235,8 @@ TIP: If a shard is unassigned with an allocation status of `no_valid_shard_copy`
===== Unassigned replica shard
+====== Allocation delayed
+
The following response contains an allocation explanation for a replica that's
unassigned due to <>.
@@ -241,8 +294,52 @@ unassigned due to <>.
<2> The remaining delay before allocating the replica shard.
<3> Information about the shard data found on a node.
+====== Allocation throttled
+
+The following response contains an allocation explanation for a replica that's
+queued to allocate but currently waiting on other queued shards.
+
+[source,js]
+----
+{
+ "index" : "my-index-000001",
+ "shard" : 0,
+ "primary" : false,
+ "current_state" : "unassigned",
+ "unassigned_info" : {
+ "reason" : "NODE_LEFT",
+ "at" : "2017-01-04T18:53:59.498Z",
+ "details" : "node_left[G92ZwuuaRY-9n8_tc-IzEg]",
+ "last_allocation_status" : "no_attempt"
+ },
+ "can_allocate": "throttled",
+ "allocate_explanation": "Elasticsearch is currently busy with other activities. It expects to be able to allocate this shard when those activities finish. Please wait.",
+ "node_allocation_decisions" : [
+ {
+ "node_id" : "3sULLVJrRneSg0EfBB-2Ew",
+ "node_name" : "node_t0",
+ "transport_address" : "127.0.0.1:9400",
+ "roles" : ["data_content", "data_hot"],
+ "node_decision" : "no",
+ "deciders" : [
+ {
+ "decider": "throttling",
+ "decision": "THROTTLE",
+ "explanation": "reached the limit of incoming shard recoveries [2], cluster setting [cluster.routing.allocation.node_concurrent_incoming_recoveries=2] (can also be set via [cluster.routing.allocation.node_concurrent_recoveries])"
+ }
+ ]
+ }
+ ]
+}
+----
+// NOTCONSOLE
+
+This is a transient message that might appear when a large amount of shards are allocating.
+
===== Assigned shard
+====== Cannot remain on current node
+
The following response contains an allocation explanation for an assigned shard.
The response indicates the shard is not allowed to remain on its current node
and must be reallocated.
@@ -295,6 +392,8 @@ and must be reallocated.
<2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node.
<3> Whether the shard is allowed to be allocated to another node.
+====== Must remain on current node
+
The following response contains an allocation explanation for a shard that must
remain on its current node. Moving the shard to another node would not improve
cluster balance.
@@ -338,7 +437,7 @@ cluster balance.
===== No arguments
If you call the API with no arguments, {es} retrieves an allocation explanation
-for an arbitrary unassigned primary or replica shard.
+for an arbitrary unassigned primary or replica shard, returning any unassigned primary shards first.
[source,console]
----
diff --git a/docs/reference/cluster/delete-desired-balance.asciidoc b/docs/reference/cluster/delete-desired-balance.asciidoc
index f81dcab011da4..c67834269e505 100644
--- a/docs/reference/cluster/delete-desired-balance.asciidoc
+++ b/docs/reference/cluster/delete-desired-balance.asciidoc
@@ -6,7 +6,7 @@
NOTE: {cloud-only}
-Discards the current desired balance and computes a new desired balance starting from the current allocation of shards.
+Discards the current <> and computes a new desired balance starting from the current allocation of shards.
This can sometimes help {es} find a desired balance which needs fewer shard movements to achieve, especially if the
cluster has experienced changes so substantial that the current desired balance is no longer optimal without {es} having
detected that the current desired balance will take more shard movements to achieve than needed. However, this API
diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc
index 3fd87dcfedc4f..74afdaa52daf1 100644
--- a/docs/reference/cluster/get-desired-balance.asciidoc
+++ b/docs/reference/cluster/get-desired-balance.asciidoc
@@ -8,7 +8,7 @@ NOTE: {cloud-only}
Exposes:
-* the desired balance computation and reconciliation stats
+* the <> computation and reconciliation stats
* balancing stats such as distribution of shards, disk and ingest forecasts
across nodes and data tiers (based on the current cluster state)
* routing table with each shard current and desired location
diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc
index 084ff471367ce..61c58cea95b83 100644
--- a/docs/reference/cluster/nodes-stats.asciidoc
+++ b/docs/reference/cluster/nodes-stats.asciidoc
@@ -808,6 +808,14 @@ This is not shown for the `shards` level, since mappings may be shared across th
`total_estimated_overhead_in_bytes`::
(integer) Estimated heap overhead, in bytes, of mappings on this node, which allows for 1kiB of heap for every mapped field.
+`total_segments`::
+(integer) Estimated number of Lucene segments on this node
+
+`total_segment_fields`::
+(integer) Estimated number of fields at the segment level on this node
+
+`average_fields_per_segment`::
+(integer) Estimated average number of fields per segment on this node
=======
`dense_vector`::
@@ -834,6 +842,142 @@ This is not shown for the `shards` level, since mappings may be shared across th
=======
+`shards`::
+(object) When the `shards` level is requested, contains the aforementioned `indices` statistics for every shard (per
+index, and then per shard ID), as well as the following shard-specific statistics (which are not shown when the
+requested level is higher than `shards`):
++
+.Additional shard-specific statistics for the `shards` level
+[%collapsible%open]
+=======
+
+`routing`::
+(object) Contains routing information about the shard.
++
+.Properties of `routing`
+[%collapsible%open]
+========
+
+`state`::
+(string) State of the shard. Returned values are:
++
+* `INITIALIZING`: The shard is initializing/recovering.
+* `RELOCATING`: The shard is relocating.
+* `STARTED`: The shard has started.
+* `UNASSIGNED`: The shard is not assigned to any node.
+
+`primary`::
+(Boolean) Whether the shard is a primary shard or not.
+
+`node`::
+(string) ID of the node the shard is allocated to.
+
+`relocating_node`::
+(string) ID of the node the shard is either relocating to or relocating from, or null if shard is not relocating.
+
+========
+
+`commit`::
+(object) Contains information regarding the last commit point of the shard.
++
+.Properties of `commit`
+[%collapsible%open]
+========
+
+`id`::
+(string) Base64 version of the commit ID.
+
+`generation`::
+(integer) Lucene generation of the commit.
+
+`user_data`::
+(object) Contains additional technical information about the commit.
+
+`num_docs`::
+(integer) The number of docs in the commit.
+
+========
+
+`seq_no`::
+(object) Contains information about <> and checkpoints for the shard.
++
+.Properties of `seq_no`
+[%collapsible%open]
+========
+
+`max_seq_no`::
+(integer) The maximum sequence number issued so far.
+
+`local_checkpoint`::
+(integer) The current local checkpoint of the shard.
+
+`global_checkpoint`::
+(integer) The current global checkpoint of the shard.
+
+========
+
+`retention_leases`::
+(object) Contains information about <>.
++
+.Properties of `retention_leases`
+[%collapsible%open]
+========
+
+`primary_term`::
+(integer) The primary term of this retention lease collection.
+
+`version`::
+(integer) The current version of the retention lease collection.
+
+`leases`::
+(array of objects) List of current leases for this shard.
++
+.Properties of `leases`
+[%collapsible%open]
+=========
+
+`id`::
+(string) The ID of the lease.
+
+`retaining_seq_no`::
+(integer) The minimum sequence number to be retained by the lease.
+
+`timestamp`::
+(integer) The timestamp of when the lease was created or renewed.
+Recorded in milliseconds since the {wikipedia}/Unix_time[Unix Epoch].
+
+`source`::
+(string) The source of the lease.
+
+=========
+========
+
+`shard_path`::
+(object)
++
+.Properties of `shard_path`
+[%collapsible%open]
+========
+
+`state_path`::
+(string) The state-path root, without the index name and the shard ID.
+
+`data_path`::
+(string) The data-path root, without the index name and the shard ID.
+
+`is_custom_data_path`::
+(boolean) Whether the data path is a custom data location and therefore outside of the nodes configured data paths.
+
+========
+
+`search_idle`::
+(boolean) Whether the shard is <> or not.
+
+`search_idle_time`::
+(integer) Time since previous searcher access.
+Recorded in milliseconds.
+
+=======
======
[[cluster-nodes-stats-api-response-body-os]]
diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc
index 3b429ef427071..c39bc0dcd2878 100644
--- a/docs/reference/cluster/stats.asciidoc
+++ b/docs/reference/cluster/stats.asciidoc
@@ -1282,6 +1282,31 @@ They are included here for expert users, but should otherwise be ignored.
=====
+====
+
+`repositories`::
+(object) Contains statistics about the <> repositories defined in the cluster, broken down
+by repository type.
++
+.Properties of `repositories`
+[%collapsible%open]
+=====
+
+`count`:::
+(integer) The number of repositories of this type in the cluster.
+
+`read_only`:::
+(integer) The number of repositories of this type in the cluster which are registered read-only.
+
+`read_write`:::
+(integer) The number of repositories of this type in the cluster which are not registered as read-only.
+
+Each repository type may also include other statistics about the repositories of that type here.
+
+=====
+
+====
+
[[cluster-stats-api-example]]
==== {api-examples-title}
@@ -1579,6 +1604,9 @@ The API returns the following response:
},
"snapshots": {
...
+ },
+ "repositories": {
+ ...
}
}
--------------------------------------------------
@@ -1589,6 +1617,7 @@ The API returns the following response:
// TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/]
// TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/]
// TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/]
+// TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/]
// TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/]
// TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/]
// TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/]
@@ -1600,7 +1629,7 @@ The API returns the following response:
// the plugins that will be in it. And because we figure folks don't need to
// see an exhaustive list anyway.
// 2. Similarly, ignore the contents of `network_types`, `discovery_types`,
-// `packaging_types` and `snapshots`.
+// `packaging_types`, `snapshots` and `repositories`.
// 3. Ignore the contents of the (nodes) count object, as what's shown here
// depends on the license. Voting-only nodes are e.g. only shown when this
// test runs with a basic license.
diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc
index 076b315558b60..1290f289e5bbd 100644
--- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc
+++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc
@@ -5,7 +5,7 @@
[[data-streams-change-mappings-and-settings]]
=== Change mappings and settings for a data stream
-Each data stream has a <> has a <>. Mappings and index settings from this template are applied to new
backing indices created for the stream. This includes the stream's first
backing index, which is auto-generated when the stream is created.
diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc
index 771a08d97d949..44ae77d072034 100644
--- a/docs/reference/data-streams/downsampling-manual.asciidoc
+++ b/docs/reference/data-streams/downsampling-manual.asciidoc
@@ -14,7 +14,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline
// TEARDOWN
////
-The recommended way to downsample a time series data stream (TSDS) is
+The recommended way to <> a <> is
<>. However, if
you're not using ILM, you can downsample a TSDS manually. This guide shows you
how, using typical Kubernetes cluster monitoring data.
@@ -32,7 +32,7 @@ To test out manual downsampling, follow these steps:
==== Prerequisites
* Refer to the <>.
-* It is not possible to downsample a data stream directly, nor
+* It is not possible to downsample a <> directly, nor
multiple indices at once. It's only possible to downsample one time series index
(TSDS backing index).
* In order to downsample an index, it needs to be read-only. For a TSDS write
diff --git a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc
index f20c949c2fbc8..315f7fa85e45f 100644
--- a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc
+++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc
@@ -4,7 +4,7 @@
Delete Data Stream Lifecycle
++++
-Deletes the lifecycle from a set of data streams.
+Deletes the <> from a set of data streams.
[[delete-lifecycle-api-prereqs]]
==== {api-prereq-title}
diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc
index 7968bb78939e8..2b15886ebe192 100644
--- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc
+++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc
@@ -4,7 +4,7 @@
Explain Data Stream Lifecycle
++++
-Retrieves the current data stream lifecycle status for one or more data stream backing indices.
+Retrieves the current <> status for one or more data stream backing indices.
[[explain-lifecycle-api-prereqs]]
==== {api-prereq-title}
diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc
index a99fa19d9db8d..f48fa1eb52daa 100644
--- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc
+++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc
@@ -4,7 +4,7 @@
Get Data Stream Lifecycle
++++
-Gets stats about the execution of data stream lifecycle.
+Gets stats about the execution of <>.
[[get-lifecycle-stats-api-prereqs]]
==== {api-prereq-title}
diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc
index 331285af395b6..6bac1c7f7cc75 100644
--- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc
+++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc
@@ -4,7 +4,7 @@
Get Data Stream Lifecycle
++++
-Gets the lifecycle of a set of data streams.
+Gets the <> of a set of <>.
[[get-lifecycle-api-prereqs]]
==== {api-prereq-title}
@@ -128,14 +128,18 @@ The response will look like the following:
"name": "my-data-stream-1",
"lifecycle": {
"enabled": true,
- "data_retention": "7d"
+ "data_retention": "7d",
+ "effective_retention": "7d",
+ "retention_determined_by": "data_stream_configuration"
}
},
{
"name": "my-data-stream-2",
"lifecycle": {
"enabled": true,
- "data_retention": "7d"
+ "data_retention": "7d",
+ "effective_retention": "7d",
+ "retention_determined_by": "data_stream_configuration"
}
}
]
diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc
index 7d33a5b5f880c..c60c105e818ab 100644
--- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc
+++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc
@@ -4,7 +4,7 @@
Put Data Stream Lifecycle
++++
-Configures the data stream lifecycle for the targeted data streams.
+Configures the data stream <> for the targeted <>.
[[put-lifecycle-api-prereqs]]
==== {api-prereq-title}
diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc
index 16ccf2ef82391..e4d5acfb704d3 100644
--- a/docs/reference/data-streams/lifecycle/index.asciidoc
+++ b/docs/reference/data-streams/lifecycle/index.asciidoc
@@ -14,10 +14,11 @@ To achieve that, it supports:
* Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance
and backwards incompatible mapping changes.
* Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored.
-{es} is allowed at a later time to delete data older than this time period.
+{es} is allowed at a later time to delete data older than this time period. Retention can be configured on the data stream level
+or on a global level. Read more about the different options in this <>.
A data stream lifecycle also supports downsampling the data stream backing indices.
-See <> for
+See <> for
more details.
[discrete]
@@ -33,16 +34,17 @@ each data stream and performs the following steps:
3. After an index is not the write index anymore (i.e. the data stream has been rolled over),
automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets
the long tail of small segments instead of the whole shard. As the segments are organised
-into tiers of exponential sizes, merging the long tail of small segments is only a
+into tiers of exponential sizes, merging the long tail of small segments is only a
fraction of the cost of force merging to a single segment. The small segments would usually
hold the most recent data so tail merging will focus the merging resources on the higher-value
data that is most likely to keep being queried.
-4. If <> is configured it will execute
+4. If <> is configured it will execute
all the configured downsampling rounds.
5. Applies retention to the remaining backing indices. This means deleting the backing indices whose
-`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing
-indices and it is either the time since the backing index got rolled over, or the time optionally configured in the
-<> setting.
+`generation_time` is longer than the effective retention period (read more about the
+<>). The `generation_time` is only applicable to rolled
+over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured
+in the <> setting.
IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing
index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but
@@ -75,4 +77,6 @@ include::tutorial-manage-new-data-stream.asciidoc[]
include::tutorial-manage-existing-data-stream.asciidoc[]
+include::tutorial-manage-data-stream-retention.asciidoc[]
+
include::tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc[]
diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc
new file mode 100644
index 0000000000000..1b2996c62e2df
--- /dev/null
+++ b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc
@@ -0,0 +1,218 @@
+[role="xpack"]
+[[tutorial-manage-data-stream-retention]]
+=== Tutorial: Data stream retention
+
+In this tutorial, we are going to go over the data stream lifecycle retention; we will define it, go over how it can be configured
+and how it can gets applied. Keep in mind, the following options apply only to data streams that are managed by the data stream lifecycle.
+
+. <>
+. <>
+. <>
+. <>
+
+You can verify if a data steam is managed by the data stream lifecycle via the <>:
+
+////
+[source,console]
+----
+PUT /_index_template/template
+{
+ "index_patterns": ["my-data-stream*"],
+ "template": {
+ "lifecycle": {}
+ },
+ "data_stream": { }
+}
+
+PUT /_data_stream/my-data-stream
+----
+// TESTSETUP
+////
+
+////
+[source,console]
+----
+DELETE /_data_stream/my-data-stream*
+DELETE /_index_template/template
+PUT /_cluster/settings
+{
+ "persistent" : {
+ "data_streams.lifecycle.retention.*" : null
+ }
+}
+----
+// TEARDOWN
+////
+
+[source,console]
+--------------------------------------------------
+GET _data_stream/my-data-stream/_lifecycle
+--------------------------------------------------
+
+The result should look like this:
+
+[source,console-result]
+--------------------------------------------------
+{
+ "data_streams": [
+ {
+ "name": "my-data-stream", <1>
+ "lifecycle": {
+ "enabled": true <2>
+ }
+ }
+ ]
+}
+--------------------------------------------------
+// TESTRESPONSE[skip:the result is for illustrating purposes only]
+<1> The name of your data stream.
+<2> Ensure that the lifecycle is enabled, meaning this should be `true`.
+
+[discrete]
+[[what-is-retention]]
+==== What is data stream retention?
+
+We define retention as the least amount of time the data of a data stream are going to be kept in {es}. After this time period
+has passed, {es} is allowed to remove these data to free up space and/or manage costs.
+
+NOTE: Retention does not define the period that the data will be removed, but the minimum time period they will be kept.
+
+We define 4 different types of retention:
+
+* The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be
+set via an <> for future data streams or via the <> for an existing data stream. When the data stream retention is not set, it implies that the data
+need to be kept forever.
+* The global default retention, let's call it `default_retention`, which is a retention configured via the cluster setting
+<> and will be
+applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively,
+it ensures that there will be no data streams keeping their data forever. This can be set via the
+<>.
+* The global max retention, let's call it `max_retention`, which is a retention configured via the cluster setting
+<> and will be applied to
+all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention
+will exceed this time period. This can be set via the <>.
+* The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment.
+Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is
+calculated as it is described <>.
+
+NOTE: Global default and max retention do not apply to data streams internal to elastic. Internal data streams are recognised
+ either by having the `system` flag set to `true` or if their name is prefixed with a dot (`.`).
+
+[discrete]
+[[retention-configuration]]
+==== How to configure retention?
+
+- By setting the `data_retention` on the data stream level. This retention can be configured in two ways:
++
+-- For new data streams, it can be defined in the index template that would be applied during the data stream's creation.
+You can use the <>, for example:
++
+[source,console]
+--------------------------------------------------
+PUT _index_template/template
+{
+ "index_patterns": ["my-data-stream*"],
+ "data_stream": { },
+ "priority": 500,
+ "template": {
+ "lifecycle": {
+ "data_retention": "7d"
+ }
+ },
+ "_meta": {
+ "description": "Template with data stream lifecycle"
+ }
+}
+--------------------------------------------------
+-- For an existing data stream, it can be set via the <>.
++
+[source,console]
+----
+PUT _data_stream/my-data-stream/_lifecycle
+{
+ "data_retention": "30d" <1>
+}
+----
+// TEST[continued]
+<1> The retention period of this data stream is set to 30 days.
+
+- By setting the global retention via the `data_streams.lifecycle.retention.default` and/or `data_streams.lifecycle.retention.max`
+that are set on a cluster level. You can be set via the <>. For example:
++
+[source,console]
+--------------------------------------------------
+PUT /_cluster/settings
+{
+ "persistent" : {
+ "data_streams.lifecycle.retention.default" : "7d",
+ "data_streams.lifecycle.retention.max" : "90d"
+ }
+}
+--------------------------------------------------
+// TEST[continued]
+
+[discrete]
+[[effective-retention-calculation]]
+==== How is the effective retention calculated?
+The effective is calculated in the following way:
+
+- The `effective_retention` is the `default_retention`, when `default_retention` is defined and the data stream does not
+have `data_retention`.
+- The `effective_retention` is the `data_retention`, when `data_retention` is defined and if `max_retention` is defined,
+it is less than the `max_retention`.
+- The `effective_retention` is the `max_retention`, when `max_retention` is defined, and the data stream has either no
+`data_retention` or its `data_retention` is greater than the `max_retention`.
+
+The above is demonstrated in the examples below:
+
+|===
+|`default_retention` |`max_retention` |`data_retention` |`effective_retention` |Retention determined by
+
+|Not set |Not set |Not set |Infinite |N/A
+|Not relevant |12 months |**30 days** |30 days |`data_retention`
+|Not relevant |Not set |**30 days** |30 days |`data_retention`
+|**30 days** |12 months |Not set |30 days |`default_retention`
+|**30 days** |30 days |Not set |30 days |`default_retention`
+|Not relevant |**30 days** |12 months |30 days |`max_retention`
+|Not set |**30 days** |Not set |30 days |`max_retention`
+|===
+
+Considering our example, if we retrieve the lifecycle of `my-data-stream`:
+[source,console]
+----
+GET _data_stream/my-data-stream/_lifecycle
+----
+// TEST[continued]
+
+We see that it will remain the same with what the user configured:
+[source,console-result]
+----
+{
+ "data_streams": [
+ {
+ "name": "my-data-stream",
+ "lifecycle": {
+ "enabled": true,
+ "data_retention": "30d",
+ "effective_retention": "30d",
+ "retention_determined_by": "data_stream_configuration"
+ }
+ }
+ ]
+}
+----
+
+[discrete]
+[[effective-retention-application]]
+==== How is the effective retention applied?
+
+Retention is applied to the remaining backing indices of a data stream as the last step of
+<>. Data stream lifecycle will retrieve the backing indices
+whose `generation_time` is longer than the effective retention period and delete them. The `generation_time` is only
+applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time
+optionally configured in the <> setting.
+
+IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing
+index have passed the retention period. As a result, the retention period is not the exact time data get deleted, but
+the minimum time data will be stored.
diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc
index c34340a096046..01d51cdde3167 100644
--- a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc
+++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc
@@ -91,10 +91,12 @@ The result will look like this:
{
"data_streams": [
{
- "name": "my-data-stream",<1>
+ "name": "my-data-stream", <1>
"lifecycle": {
- "enabled": true, <2>
- "data_retention": "7d" <3>
+ "enabled": true, <2>
+ "data_retention": "7d", <3>
+ "effective_retention": "7d", <4>
+ "retention_determined_by": "data_stream_configuration"
}
}
]
@@ -102,8 +104,9 @@ The result will look like this:
--------------------------------------------------
<1> The name of your data stream.
<2> Shows if the data stream lifecycle is enabled for this data stream.
-<3> The retention period of the data indexed in this data stream, this means that the data in this data stream will
-be kept at least for 7 days. After that {es} can delete it at its own discretion.
+<3> The retention period of the data indexed in this data stream, as configured by the user.
+<4> The retention period that will be applied by the data stream lifecycle. This means that the data in this data stream will
+ be kept at least for 7 days. After that {es} can delete it at its own discretion.
If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the
<>:
diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc
index 5b2e2a1ec70a2..a2c12466b7f2b 100644
--- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc
+++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc
@@ -1,14 +1,14 @@
[role="xpack"]
[[tutorial-migrate-data-stream-from-ilm-to-dsl]]
-=== Tutorial: Migrate ILM managed data stream to data stream lifecycle
+=== Tutorial: Migrate ILM managed data stream to data stream lifecycle
-In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to
-data stream lifecycle. The existing {ilm-init} managed backing indices will continue
+In this tutorial we'll look at migrating an existing data stream from <> to
+<>. The existing {ilm-init} managed backing indices will continue
to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however,
-the new backing indices will be managed by data stream lifecycle.
-This way, a data stream is gradually migrated away from being managed by {ilm-init} to
+the new backing indices will be managed by data stream lifecycle.
+This way, a data stream is gradually migrated away from being managed by {ilm-init} to
being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle
-can co-manage a data stream; however, an index can only be managed by one system at
+can co-manage a data stream; however, an index can only be managed by one system at
a time.
[discrete]
@@ -17,7 +17,7 @@ a time.
To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute
two steps:
-1. Update the index template that's backing the data stream to set <>
+1. Update the index template that's backing the data stream to set <>
to `false`, and to configure data stream lifecycle.
2. Configure the data stream lifecycle for the _existing_ data stream using
the <>.
@@ -174,8 +174,8 @@ in the index template).
To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute
two steps:
-1. Update the index template that's backing the data stream to set <>
-to `false`, and to configure data stream lifecycle.
+1. Update the index template that's backing the data stream to set <>
+to `false`, and to configure data stream lifecycle.
2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using
the <>.
@@ -209,9 +209,9 @@ PUT _index_template/dsl-data-stream-template
// TEST[continued]
<1> The `prefer_ilm` setting will now be configured on the **new** backing indices
-(created by rolling over the data stream) such that {ilm-init} does _not_ take
+(created by rolling over the data stream) such that {ilm-init} does _not_ take
precedence over data stream lifecycle.
-<2> We're configuring the data stream lifecycle so _new_ data streams will be
+<2> We're configuring the data stream lifecycle so _new_ data streams will be
managed by data stream lifecycle.
We've now made sure that new data streams will be managed by data stream lifecycle.
@@ -227,7 +227,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle
----
// TEST[continued]
-We can inspect the data stream to check that the next generation will indeed be
+We can inspect the data stream to check that the next generation will indeed be
managed by data stream lifecycle:
[source,console]
@@ -266,7 +266,9 @@ GET _data_stream/dsl-data-stream
"template": "dsl-data-stream-template",
"lifecycle": {
"enabled": true,
- "data_retention": "7d"
+ "data_retention": "7d",
+ "effective_retention": "7d",
+ "retention_determined_by": "data_stream_configuration"
},
"ilm_policy": "pre-dsl-ilm-policy",
"next_generation_managed_by": "Data stream lifecycle", <3>
@@ -292,7 +294,7 @@ GET _data_stream/dsl-data-stream
<4> The `prefer_ilm` setting value we configured in the index template is reflected
and will be configured accordingly for new backing indices.
-We'll now rollover the data stream to see the new generation index being managed by
+We'll now rollover the data stream to see the new generation index being managed by
data stream lifecycle:
[source,console]
@@ -344,7 +346,9 @@ GET _data_stream/dsl-data-stream
"template": "dsl-data-stream-template",
"lifecycle": {
"enabled": true,
- "data_retention": "7d"
+ "data_retention": "7d",
+ "effective_retention": "7d",
+ "retention_determined_by": "data_stream_configuration"
},
"ilm_policy": "pre-dsl-ilm-policy",
"next_generation_managed_by": "Data stream lifecycle",
@@ -375,9 +379,9 @@ in the index template
[discrete]
[[migrate-from-dsl-to-ilm]]
==== Migrate data stream back to ILM
-We can easily change this data stream to be managed by {ilm-init} because we didn't remove
-the {ilm-init} policy when we <>.
+We can easily change this data stream to be managed by {ilm-init} because we didn't remove
+the {ilm-init} policy when we <>.
We can achieve this in two ways:
diff --git a/docs/reference/data-streams/modify-data-streams-api.asciidoc b/docs/reference/data-streams/modify-data-streams-api.asciidoc
index f05e76e67c32f..2da869083df22 100644
--- a/docs/reference/data-streams/modify-data-streams-api.asciidoc
+++ b/docs/reference/data-streams/modify-data-streams-api.asciidoc
@@ -4,7 +4,7 @@
Modify data streams
++++
-Performs one or more data stream modification actions in a single atomic
+Performs one or more <> modification actions in a single atomic
operation.
[source,console]
diff --git a/docs/reference/data-streams/promote-data-stream-api.asciidoc b/docs/reference/data-streams/promote-data-stream-api.asciidoc
index 281e9b549abcb..111c7a2256f8a 100644
--- a/docs/reference/data-streams/promote-data-stream-api.asciidoc
+++ b/docs/reference/data-streams/promote-data-stream-api.asciidoc
@@ -5,7 +5,7 @@
Promote data stream
++++
-The purpose of the promote data stream api is to turn
+The purpose of the promote <> API is to turn
a data stream that is replicated by CCR into a regular
data stream.
diff --git a/docs/reference/data-streams/tsds-reindex.asciidoc b/docs/reference/data-streams/tsds-reindex.asciidoc
index ea4ba16df5c4a..9d6594db4e779 100644
--- a/docs/reference/data-streams/tsds-reindex.asciidoc
+++ b/docs/reference/data-streams/tsds-reindex.asciidoc
@@ -9,7 +9,7 @@
[[tsds-reindex-intro]]
==== Introduction
-With reindexing, you can copy documents from an old time-series data stream (TSDS) to a new one. Data streams support
+With reindexing, you can copy documents from an old <> to a new one. Data streams support
reindexing in general, with a few <>. Still, time-series data streams
introduce additional challenges due to tight control on the accepted timestamp range for each backing index they
contain. Direct use of the reindex API would likely error out due to attempting to insert documents with timestamps that are
diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc
index de89fa1ca3f31..01573658c33d0 100644
--- a/docs/reference/data-streams/tsds.asciidoc
+++ b/docs/reference/data-streams/tsds.asciidoc
@@ -107,6 +107,7 @@ parameter:
* <>
* <>
* <>
+* <>
For a flattened field, use the `time_series_dimensions` parameter to configure an array of fields as dimensions. For details refer to <>.
diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc
index d3f591ccfe6c1..e8cc2b21492ae 100644
--- a/docs/reference/eql/eql-apis.asciidoc
+++ b/docs/reference/eql/eql-apis.asciidoc
@@ -1,7 +1,7 @@
[[eql-apis]]
== EQL APIs
-Event Query Language (EQL) is a query language for event-based time series data,
+<> is a query language for event-based time series data,
such as logs, metrics, and traces. For an overview of EQL and related tutorials,
see <>.
diff --git a/docs/reference/esql/esql-across-clusters.asciidoc b/docs/reference/esql/esql-across-clusters.asciidoc
index 8bc1e2a83fc19..d13b3db1c73ea 100644
--- a/docs/reference/esql/esql-across-clusters.asciidoc
+++ b/docs/reference/esql/esql-across-clusters.asciidoc
@@ -54,11 +54,6 @@ Refer to <> for prerequisi
[[esql-ccs-security-model-api-key]]
===== API key authentication
-[NOTE]
-====
-`ENRICH` is *not supported* in this version when using {esql} with the API key based security model.
-====
-
The following information pertains to using {esql} across clusters with the <>. You'll need to follow the steps on that page for the *full setup instructions*. This page only contains additional information specific to {esql}.
API key based cross-cluster search (CCS) enables more granular control over allowed actions between clusters.
@@ -71,6 +66,7 @@ You will need to:
Using {esql} with the API key based security model requires some additional permissions that may not be needed when using the traditional query DSL based search.
The following example API call creates a role that can query remote indices using {esql} when using the API key based security model.
+The final privilege, `remote_cluster`, is required to allow remote enrich operations.
[source,console]
----
@@ -89,7 +85,17 @@ POST /_security/role/remote1
"privileges": [ "read","read_cross_cluster" ], <4>
"clusters" : ["my_remote_cluster"] <5>
}
- ]
+ ],
+ "remote_cluster": [ <6>
+ {
+ "privileges": [
+ "monitor_enrich"
+ ],
+ "clusters": [
+ "my_remote_cluster"
+ ]
+ }
+ ]
}
----
@@ -100,6 +106,7 @@ POST /_security/role/remote1
<5> The remote clusters to which these privileges apply.
This remote cluster must be configured with a <> and connected to the remote cluster before the remote index can be queried.
Verify connection using the <> API.
+<6> Required to allow remote enrichment. Without this, the user cannot read from the `.enrich` indices on the remote cluster. The `remote_cluster` security privilege was introduced in version *8.15.0*.
You will then need a user or API key with the permissions you created above. The following example API call creates a user with the `remote1` role.
@@ -114,6 +121,11 @@ POST /_security/user/remote_user
Remember that all cross-cluster requests from the local cluster are bound by the cross cluster API key’s privileges, which are controlled by the remote cluster's administrator.
+[TIP]
+====
+Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to add the new permissions required for {esql} with ENRICH.
+====
+
[discrete]
[[ccq-remote-cluster-setup]]
==== Remote cluster setup
@@ -174,9 +186,11 @@ clusters, aiming to minimize computation or inter-cluster data transfer. Ensurin
the policy exists with consistent data on both the local cluster and the remote
clusters is critical for ES|QL to produce a consistent query result.
-[NOTE]
+[TIP]
====
-Enrich across clusters is *not supported* in this version when using {esql} with the <>.
+Enrich in {esql} across clusters using the API key based security model was introduced in version *8.15.0*.
+Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to use the new required permissions.
+Refer to the example in the <> section.
====
In the following example, the enrich with `hosts` policy can be executed on
diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc
index 686a71506bc14..8586cd1ae6bce 100644
--- a/docs/reference/esql/esql-apis.asciidoc
+++ b/docs/reference/esql/esql-apis.asciidoc
@@ -1,7 +1,7 @@
[[esql-apis]]
== {esql} APIs
-The {es} Query Language ({esql}) provides a powerful way to filter, transform,
+The <> provides a powerful way to filter, transform,
and analyze data stored in {es}, and in the future in other runtimes. For an
overview of {esql} and related tutorials, see <>.
diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc
index 90f8c06b9124a..5cad566f7f9c0 100644
--- a/docs/reference/esql/esql-async-query-delete-api.asciidoc
+++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc
@@ -4,7 +4,7 @@
{esql} async query delete API
++++
-The {esql} async query delete API is used to manually delete an async query
+The <> async query delete API is used to manually delete an async query
by ID. If the query is still running, the query will be cancelled. Otherwise,
the stored results are deleted.
diff --git a/docs/reference/esql/esql-commands.asciidoc b/docs/reference/esql/esql-commands.asciidoc
index bed79299b1cc1..235113ac1394a 100644
--- a/docs/reference/esql/esql-commands.asciidoc
+++ b/docs/reference/esql/esql-commands.asciidoc
@@ -37,6 +37,9 @@ image::images/esql/processing-command.svg[A processing command changing an input
* <>
* <>
* <>
+ifeval::["{release-state}"=="unreleased"]
+* experimental:[] <>
+endif::[]
* <>
* <>
ifeval::["{release-state}"=="unreleased"]
@@ -59,6 +62,9 @@ include::processing-commands/drop.asciidoc[]
include::processing-commands/enrich.asciidoc[]
include::processing-commands/eval.asciidoc[]
include::processing-commands/grok.asciidoc[]
+ifeval::["{release-state}"=="unreleased"]
+include::processing-commands/inlinestats.asciidoc[]
+endif::[]
include::processing-commands/keep.asciidoc[]
include::processing-commands/limit.asciidoc[]
ifeval::["{release-state}"=="unreleased"]
diff --git a/docs/reference/esql/esql-multi-index.asciidoc b/docs/reference/esql/esql-multi-index.asciidoc
index 41ff6a27417b1..25874a132d93d 100644
--- a/docs/reference/esql/esql-multi-index.asciidoc
+++ b/docs/reference/esql/esql-multi-index.asciidoc
@@ -97,13 +97,12 @@ In addition, if the query refers to this unsupported field directly, the query f
[source.merge.styled,esql]
----
FROM events_*
-| KEEP @timestamp, client_ip, event_duration, message
-| SORT @timestamp DESC
+| SORT client_ip DESC
----
[source,bash]
----
-Cannot use field [client_ip] due to ambiguities being mapped as
+Cannot use field [client_ip] due to ambiguities being mapped as
[2] incompatible types:
[ip] in [events_ip],
[keyword] in [events_keyword]
@@ -113,12 +112,13 @@ Cannot use field [client_ip] due to ambiguities being mapped as
[[esql-multi-index-union-types]]
=== Union types
+experimental::[]
+
{esql} has a way to handle <>. When the same field is mapped to multiple types in multiple indices,
the type of the field is understood to be a _union_ of the various types in the index mappings.
As seen in the preceding examples, this _union type_ cannot be used in the results,
-and cannot be referred to by the query
--- except when it's passed to a type conversion function that accepts all the types in the _union_ and converts the field
-to a single type. {esql} offers a suite of <> to achieve this.
+and cannot be referred to by the query -- except in `KEEP`, `DROP` or when it's passed to a type conversion function that accepts all the types in
+the _union_ and converts the field to a single type. {esql} offers a suite of <> to achieve this.
In the above examples, the query can use a command like `EVAL client_ip = TO_IP(client_ip)` to resolve
the union of `ip` and `keyword` to just `ip`.
diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc
index e8cfa03e3ee88..c8c735b73d2a4 100644
--- a/docs/reference/esql/esql-query-api.asciidoc
+++ b/docs/reference/esql/esql-query-api.asciidoc
@@ -102,7 +102,7 @@ Column `name` and `type` for each column returned in `values`. Each object is a
Column `name` and `type` for each queried column. Each object is a single column. This is only
returned if `drop_null_columns` is sent with the request.
-`rows`::
+`values`::
(array of arrays)
Values for the search results.
diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc
index 5b90e96d7a734..2c8c5e81e273d 100644
--- a/docs/reference/esql/esql-rest.asciidoc
+++ b/docs/reference/esql/esql-rest.asciidoc
@@ -278,6 +278,47 @@ POST /_query
----
// TEST[setup:library]
+The parameters can be named parameters or positional parameters.
+
+Named parameters use question mark placeholders (`?`) followed by a string.
+
+[source,console]
+----
+POST /_query
+{
+ "query": """
+ FROM library
+ | EVAL year = DATE_EXTRACT("year", release_date)
+ | WHERE page_count > ?page_count AND author == ?author
+ | STATS count = COUNT(*) by year
+ | WHERE count > ?count
+ | LIMIT 5
+ """,
+ "params": [{"page_count" : 300}, {"author" : "Frank Herbert"}, {"count" : 0}]
+}
+----
+// TEST[setup:library]
+
+Positional parameters use question mark placeholders (`?`) followed by an
+integer.
+
+[source,console]
+----
+POST /_query
+{
+ "query": """
+ FROM library
+ | EVAL year = DATE_EXTRACT("year", release_date)
+ | WHERE page_count > ?1 AND author == ?2
+ | STATS count = COUNT(*) by year
+ | WHERE count > ?3
+ | LIMIT 5
+ """,
+ "params": [300, "Frank Herbert", 0]
+}
+----
+// TEST[setup:library]
+
[discrete]
[[esql-rest-async-query]]
==== Running an async {esql} query
diff --git a/docs/reference/esql/functions/aggregation-functions.asciidoc b/docs/reference/esql/functions/aggregation-functions.asciidoc
index 4c248704b6385..7cdc42ea6cbf9 100644
--- a/docs/reference/esql/functions/aggregation-functions.asciidoc
+++ b/docs/reference/esql/functions/aggregation-functions.asciidoc
@@ -9,30 +9,30 @@ The <> command supports these aggregate functions:
// tag::agg_list[]
* <>
-* <>
-* <>
+* <>
+* <>
* <>
-* <>
-* <>
+* <>
+* <>
* <>
* <>
-* experimental:[] <>
-* <>
+* experimental:[] <>
+* <>
* <>
-* <>
-* experimental:[] <>
+* <>
+* experimental:[] <>
// end::agg_list[]
-include::count.asciidoc[]
-include::count-distinct.asciidoc[]
-include::median.asciidoc[]
-include::median-absolute-deviation.asciidoc[]
-include::st_centroid_agg.asciidoc[]
-include::sum.asciidoc[]
include::layout/avg.asciidoc[]
+include::layout/count.asciidoc[]
+include::layout/count_distinct.asciidoc[]
include::layout/max.asciidoc[]
+include::layout/median.asciidoc[]
+include::layout/median_absolute_deviation.asciidoc[]
include::layout/min.asciidoc[]
include::layout/percentile.asciidoc[]
+include::layout/st_centroid_agg.asciidoc[]
+include::layout/sum.asciidoc[]
include::layout/top.asciidoc[]
-include::values.asciidoc[]
-include::weighted-avg.asciidoc[]
+include::layout/values.asciidoc[]
+include::layout/weighted_avg.asciidoc[]
diff --git a/docs/reference/esql/functions/appendix/count_distinct.asciidoc b/docs/reference/esql/functions/appendix/count_distinct.asciidoc
new file mode 100644
index 0000000000000..065065cf34e06
--- /dev/null
+++ b/docs/reference/esql/functions/appendix/count_distinct.asciidoc
@@ -0,0 +1,25 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[discrete]
+[[esql-agg-count-distinct-approximate]]
+==== Counts are approximate
+
+Computing exact counts requires loading values into a set and returning its
+size. This doesn't scale when working on high-cardinality sets and/or large
+values as the required memory usage and the need to communicate those
+per-shard sets between nodes would utilize too many resources of the cluster.
+
+This `COUNT_DISTINCT` function is based on the
+https://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf[HyperLogLog++]
+algorithm, which counts based on the hashes of the values with some interesting
+properties:
+
+include::../../../aggregations/metrics/cardinality-aggregation.asciidoc[tag=explanation]
+
+The `COUNT_DISTINCT` function takes an optional second parameter to configure
+the precision threshold. The precision_threshold options allows to trade memory
+for accuracy, and defines a unique count below which counts are expected to be
+close to accurate. Above this value, counts might become a bit more fuzzy. The
+maximum supported value is 40000, thresholds above this number will have the
+same effect as a threshold of 40000. The default value is `3000`.
+
diff --git a/docs/reference/esql/functions/appendix/median.asciidoc b/docs/reference/esql/functions/appendix/median.asciidoc
new file mode 100644
index 0000000000000..929a4ed0dae2c
--- /dev/null
+++ b/docs/reference/esql/functions/appendix/median.asciidoc
@@ -0,0 +1,7 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[WARNING]
+====
+`MEDIAN` is also {wikipedia}/Nondeterministic_algorithm[non-deterministic].
+This means you can get slightly different results using the same data.
+====
diff --git a/docs/reference/esql/functions/appendix/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/appendix/median_absolute_deviation.asciidoc
new file mode 100644
index 0000000000000..a4f96c800946b
--- /dev/null
+++ b/docs/reference/esql/functions/appendix/median_absolute_deviation.asciidoc
@@ -0,0 +1,7 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[WARNING]
+====
+`MEDIAN_ABSOLUTE_DEVIATION` is also {wikipedia}/Nondeterministic_algorithm[non-deterministic].
+This means you can get slightly different results using the same data.
+====
diff --git a/docs/reference/esql/functions/appendix/values.asciidoc b/docs/reference/esql/functions/appendix/values.asciidoc
new file mode 100644
index 0000000000000..ec3cfff2db6a6
--- /dev/null
+++ b/docs/reference/esql/functions/appendix/values.asciidoc
@@ -0,0 +1,10 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[WARNING]
+====
+This can use a significant amount of memory and ES|QL doesn't yet
+grow aggregations beyond memory. So this aggregation will work until
+it is used to collect more values than can fit into memory. Once it
+collects too many values it will fail the query with
+a <>.
+====
diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc
deleted file mode 100644
index b5fda636135b2..0000000000000
--- a/docs/reference/esql/functions/case.asciidoc
+++ /dev/null
@@ -1,70 +0,0 @@
-[discrete]
-[[esql-case]]
-=== `CASE`
-
-*Syntax*
-
-[source,esql]
-----
-CASE(condition1, value1[, ..., conditionN, valueN][, default_value])
-----
-
-*Parameters*
-
-`conditionX`::
-A condition.
-
-`valueX`::
-The value that's returned when the corresponding condition is the first to
-evaluate to `true`.
-
-`default_value`::
-The default value that's is returned when no condition matches.
-
-*Description*
-
-Accepts pairs of conditions and values. The function returns the value that
-belongs to the first condition that evaluates to `true`.
-
-If the number of arguments is odd, the last argument is the default value which
-is returned when no condition matches. If the number of arguments is even, and
-no condition matches, the function returns `null`.
-
-*Example*
-
-Determine whether employees are monolingual, bilingual, or polyglot:
-
-[source,esql]
-[source.merge.styled,esql]
-----
-include::{esql-specs}/docs.csv-spec[tag=case]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/docs.csv-spec[tag=case-result]
-|===
-
-Calculate the total connection success rate based on log messages:
-
-[source,esql]
-[source.merge.styled,esql]
-----
-include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate-result]
-|===
-
-Calculate an hourly error rate as a percentage of the total number of log
-messages:
-
-[source,esql]
-[source.merge.styled,esql]
-----
-include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result]
-|===
diff --git a/docs/reference/esql/functions/count-distinct.asciidoc b/docs/reference/esql/functions/count-distinct.asciidoc
deleted file mode 100644
index a9f30d24e0e83..0000000000000
--- a/docs/reference/esql/functions/count-distinct.asciidoc
+++ /dev/null
@@ -1,85 +0,0 @@
-[discrete]
-[[esql-agg-count-distinct]]
-=== `COUNT_DISTINCT`
-
-*Syntax*
-
-[source,esql]
-----
-COUNT_DISTINCT(expression[, precision_threshold])
-----
-
-*Parameters*
-
-`expression`::
-Expression that outputs the values on which to perform a distinct count.
-
-`precision_threshold`::
-Precision threshold. Refer to <>. The
-maximum supported value is 40000. Thresholds above this number will have the
-same effect as a threshold of 40000. The default value is 3000.
-
-*Description*
-
-Returns the approximate number of distinct values.
-
-*Supported types*
-
-Can take any field type as input.
-
-*Examples*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result]
-|===
-
-With the optional second parameter to configure the precision threshold:
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision-result]
-|===
-
-The expression can use inline functions. This example splits a string into
-multiple values using the `SPLIT` function and counts the unique values:
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression-result]
-|===
-
-[discrete]
-[[esql-agg-count-distinct-approximate]]
-==== Counts are approximate
-
-Computing exact counts requires loading values into a set and returning its
-size. This doesn't scale when working on high-cardinality sets and/or large
-values as the required memory usage and the need to communicate those
-per-shard sets between nodes would utilize too many resources of the cluster.
-
-This `COUNT_DISTINCT` function is based on the
-https://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf[HyperLogLog++]
-algorithm, which counts based on the hashes of the values with some interesting
-properties:
-
-include::../../aggregations/metrics/cardinality-aggregation.asciidoc[tag=explanation]
-
-The `COUNT_DISTINCT` function takes an optional second parameter to configure
-the precision threshold. The precision_threshold options allows to trade memory
-for accuracy, and defines a unique count below which counts are expected to be
-close to accurate. Above this value, counts might become a bit more fuzzy. The
-maximum supported value is 40000, thresholds above this number will have the
-same effect as a threshold of 40000. The default value is `3000`.
\ No newline at end of file
diff --git a/docs/reference/esql/functions/count.asciidoc b/docs/reference/esql/functions/count.asciidoc
deleted file mode 100644
index 66cfe76350cdd..0000000000000
--- a/docs/reference/esql/functions/count.asciidoc
+++ /dev/null
@@ -1,83 +0,0 @@
-[discrete]
-[[esql-agg-count]]
-=== `COUNT`
-
-*Syntax*
-
-[source,esql]
-----
-COUNT([expression])
-----
-
-*Parameters*
-
-`expression`::
-Expression that outputs values to be counted.
-If omitted, equivalent to `COUNT(*)` (the number of rows).
-
-*Description*
-
-Returns the total number (count) of input values.
-
-*Supported types*
-
-Can take any field type as input.
-
-*Examples*
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats.csv-spec[tag=count]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats.csv-spec[tag=count-result]
-|===
-
-To count the number of rows, use `COUNT()` or `COUNT(*)`:
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/docs.csv-spec[tag=countAll]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/docs.csv-spec[tag=countAll-result]
-|===
-
-The expression can use inline functions. This example splits a string into
-multiple values using the `SPLIT` function and counts the values:
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression-result]
-|===
-
-[[esql-agg-count-or-null]]
-To count the number of times an expression returns `TRUE` use
-a <> command to remove rows that shouldn't be included:
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats.csv-spec[tag=count-where]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats.csv-spec[tag=count-where-result]
-|===
-
-To count the same stream of data based on two different expressions
-use the pattern `COUNT( OR NULL)`:
-
-[source.merge.styled,esql]
-----
-include::{esql-specs}/stats.csv-spec[tag=count-or-null]
-----
-[%header.monospaced.styled,format=dsv,separator=|]
-|===
-include::{esql-specs}/stats.csv-spec[tag=count-or-null-result]
-|===
diff --git a/docs/reference/esql/functions/description/count.asciidoc b/docs/reference/esql/functions/description/count.asciidoc
new file mode 100644
index 0000000000000..ee806d65a8ea3
--- /dev/null
+++ b/docs/reference/esql/functions/description/count.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns the total number (count) of input values.
diff --git a/docs/reference/esql/functions/description/count_distinct.asciidoc b/docs/reference/esql/functions/description/count_distinct.asciidoc
new file mode 100644
index 0000000000000..d10825bb991f5
--- /dev/null
+++ b/docs/reference/esql/functions/description/count_distinct.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns the approximate number of distinct values.
diff --git a/docs/reference/esql/functions/description/median.asciidoc b/docs/reference/esql/functions/description/median.asciidoc
new file mode 100644
index 0000000000000..ff3b7b32ed15e
--- /dev/null
+++ b/docs/reference/esql/functions/description/median.asciidoc
@@ -0,0 +1,7 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+The value that is greater than half of all values and less than half of all values, also known as the 50% <>.
+
+NOTE: Like <>, `MEDIAN` is <>.
diff --git a/docs/reference/esql/functions/description/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/description/median_absolute_deviation.asciidoc
new file mode 100644
index 0000000000000..1a363920dd422
--- /dev/null
+++ b/docs/reference/esql/functions/description/median_absolute_deviation.asciidoc
@@ -0,0 +1,7 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns the median absolute deviation, a measure of variability. It is a robust statistic, meaning that it is useful for describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive than standard deviation. It is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`.
+
+NOTE: Like <>, `MEDIAN_ABSOLUTE_DEVIATION` is <>.
diff --git a/docs/reference/esql/functions/description/mv_percentile.asciidoc b/docs/reference/esql/functions/description/mv_percentile.asciidoc
new file mode 100644
index 0000000000000..3e731f6525cec
--- /dev/null
+++ b/docs/reference/esql/functions/description/mv_percentile.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur.
diff --git a/docs/reference/esql/functions/description/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/description/mv_pseries_weighted_sum.asciidoc
new file mode 100644
index 0000000000000..d464689f40a01
--- /dev/null
+++ b/docs/reference/esql/functions/description/mv_pseries_weighted_sum.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum.
diff --git a/docs/reference/esql/functions/description/st_centroid_agg.asciidoc b/docs/reference/esql/functions/description/st_centroid_agg.asciidoc
new file mode 100644
index 0000000000000..740accf02c33f
--- /dev/null
+++ b/docs/reference/esql/functions/description/st_centroid_agg.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Calculate the spatial centroid over a field with spatial point geometry type.
diff --git a/docs/reference/esql/functions/description/sum.asciidoc b/docs/reference/esql/functions/description/sum.asciidoc
new file mode 100644
index 0000000000000..e3956567b8656
--- /dev/null
+++ b/docs/reference/esql/functions/description/sum.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+The sum of a numeric expression.
diff --git a/docs/reference/esql/functions/description/to_datetime.asciidoc b/docs/reference/esql/functions/description/to_datetime.asciidoc
index b37bd6b22ac2f..91cbfa0b5fe1e 100644
--- a/docs/reference/esql/functions/description/to_datetime.asciidoc
+++ b/docs/reference/esql/functions/description/to_datetime.asciidoc
@@ -3,3 +3,5 @@
*Description*
Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>.
+
+NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded.
diff --git a/docs/reference/esql/functions/description/values.asciidoc b/docs/reference/esql/functions/description/values.asciidoc
new file mode 100644
index 0000000000000..b3cebcce955f0
--- /dev/null
+++ b/docs/reference/esql/functions/description/values.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. If you need the values returned in order use <>.
diff --git a/docs/reference/esql/functions/description/weighted_avg.asciidoc b/docs/reference/esql/functions/description/weighted_avg.asciidoc
new file mode 100644
index 0000000000000..a15d5d4ea171d
--- /dev/null
+++ b/docs/reference/esql/functions/description/weighted_avg.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+The weighted average of a numeric expression.
diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc
index e1bba0529d7db..4afea30660339 100644
--- a/docs/reference/esql/functions/examples/bucket.asciidoc
+++ b/docs/reference/esql/functions/examples/bucket.asciidoc
@@ -86,10 +86,6 @@ include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan]
|===
include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan-result]
|===
-
-NOTE: When providing the bucket size as the second parameter, it must be
-of a floating point type.
-
Create hourly buckets for the last 24 hours, and calculate the number of events per hour:
[source.merge.styled,esql]
----
diff --git a/docs/reference/esql/functions/examples/count.asciidoc b/docs/reference/esql/functions/examples/count.asciidoc
new file mode 100644
index 0000000000000..fb696b51e054c
--- /dev/null
+++ b/docs/reference/esql/functions/examples/count.asciidoc
@@ -0,0 +1,49 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=count]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=count-result]
+|===
+To count the number of rows, use `COUNT()` or `COUNT(*)`
+[source.merge.styled,esql]
+----
+include::{esql-specs}/docs.csv-spec[tag=countAll]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/docs.csv-spec[tag=countAll-result]
+|===
+The expression can use inline functions. This example splits a string into multiple values using the `SPLIT` function and counts the values
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression-result]
+|===
+To count the number of times an expression returns `TRUE` use a <> command to remove rows that shouldn't be included
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=count-where]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=count-where-result]
+|===
+To count the same stream of data based on two different expressions use the pattern `COUNT( OR NULL)`
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=count-or-null]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=count-or-null-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/count_distinct.asciidoc b/docs/reference/esql/functions/examples/count_distinct.asciidoc
new file mode 100644
index 0000000000000..44968c0652ec0
--- /dev/null
+++ b/docs/reference/esql/functions/examples/count_distinct.asciidoc
@@ -0,0 +1,31 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result]
+|===
+With the optional second parameter to configure the precision threshold
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision-result]
+|===
+The expression can use inline functions. This example splits a string into multiple values using the `SPLIT` function and counts the unique values
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/median.asciidoc b/docs/reference/esql/functions/examples/median.asciidoc
new file mode 100644
index 0000000000000..cb6248dcff148
--- /dev/null
+++ b/docs/reference/esql/functions/examples/median.asciidoc
@@ -0,0 +1,22 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_percentile.csv-spec[tag=median]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_percentile.csv-spec[tag=median-result]
+|===
+The expression can use inline functions. For example, to calculate the median of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN` function
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMedianNestedExpression]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMedianNestedExpression-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc
new file mode 100644
index 0000000000000..20891126c20fb
--- /dev/null
+++ b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc
@@ -0,0 +1,22 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation-result]
+|===
+The expression can use inline functions. For example, to calculate the the median absolute deviation of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN_ABSOLUTE_DEVIATION` function
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/mv_percentile.asciidoc b/docs/reference/esql/functions/examples/mv_percentile.asciidoc
new file mode 100644
index 0000000000000..9b20a5bef5e0d
--- /dev/null
+++ b/docs/reference/esql/functions/examples/mv_percentile.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/mv_percentile.csv-spec[tag=example]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/mv_percentile.csv-spec[tag=example-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/examples/mv_pseries_weighted_sum.asciidoc
new file mode 100644
index 0000000000000..bce4deb1f5225
--- /dev/null
+++ b/docs/reference/esql/functions/examples/mv_pseries_weighted_sum.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/mv_pseries_weighted_sum.csv-spec[tag=example]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/mv_pseries_weighted_sum.csv-spec[tag=example-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/st_centroid_agg.asciidoc b/docs/reference/esql/functions/examples/st_centroid_agg.asciidoc
new file mode 100644
index 0000000000000..69c291b738828
--- /dev/null
+++ b/docs/reference/esql/functions/examples/st_centroid_agg.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/spatial.csv-spec[tag=st_centroid_agg-airports]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/spatial.csv-spec[tag=st_centroid_agg-airports-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/sum.asciidoc b/docs/reference/esql/functions/examples/sum.asciidoc
new file mode 100644
index 0000000000000..1c02ccd784a54
--- /dev/null
+++ b/docs/reference/esql/functions/examples/sum.asciidoc
@@ -0,0 +1,22 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=sum]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=sum-result]
+|===
+The expression can use inline functions. For example, to calculate the sum of each employee's maximum salary changes, apply the `MV_MAX` function to each row and then sum the results
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=docsStatsSumNestedExpression]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=docsStatsSumNestedExpression-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/values.asciidoc b/docs/reference/esql/functions/examples/values.asciidoc
new file mode 100644
index 0000000000000..c013fc39d92ca
--- /dev/null
+++ b/docs/reference/esql/functions/examples/values.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/string.csv-spec[tag=values-grouped]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/string.csv-spec[tag=values-grouped-result]
+|===
+
diff --git a/docs/reference/esql/functions/examples/weighted_avg.asciidoc b/docs/reference/esql/functions/examples/weighted_avg.asciidoc
new file mode 100644
index 0000000000000..e8e8cc3eda006
--- /dev/null
+++ b/docs/reference/esql/functions/examples/weighted_avg.asciidoc
@@ -0,0 +1,13 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Example*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/stats.csv-spec[tag=weighted-avg]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/stats.csv-spec[tag=weighted-avg-result]
+|===
+
diff --git a/docs/reference/esql/functions/kibana/definition/add.json b/docs/reference/esql/functions/kibana/definition/add.json
new file mode 100644
index 0000000000000..0932a76966560
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/add.json
@@ -0,0 +1,296 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "add",
+ "description" : "Add two numbers together. If either field is <> then the result is `null`.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date_period",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "time_duration",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date_period",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date_period",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date_period",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date_period"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "integer"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "time_duration",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "time_duration",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "time_duration",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "time_duration"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "A numeric value or a date time value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "unsigned_long"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json
index 7141ca4c27443..94214a3a4f047 100644
--- a/docs/reference/esql/functions/kibana/definition/bucket.json
+++ b/docs/reference/esql/functions/kibana/definition/bucket.json
@@ -8,7 +8,7 @@
"params" : [
{
"name" : "field",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Numeric or date expression from which to derive buckets."
},
@@ -16,17 +16,17 @@
"name" : "buckets",
"type" : "date_period",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
{
"name" : "field",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Numeric or date expression from which to derive buckets."
},
@@ -34,29 +34,269 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
- "type" : "datetime",
+ "type" : "date",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
- "type" : "datetime",
+ "type" : "date",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
{
"name" : "field",
- "type" : "datetime",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "date",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "date",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "text",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "date",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "text",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "text",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "date",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "text",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ },
+ {
+ "name" : "from",
+ "type" : "text",
+ "optional" : true,
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
+ },
+ {
+ "name" : "to",
+ "type" : "text",
+ "optional" : true,
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "date"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
"optional" : false,
"description" : "Numeric or date expression from which to derive buckets."
},
@@ -64,11 +304,11 @@
"name" : "buckets",
"type" : "time_duration",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -82,7 +322,25 @@
"name" : "buckets",
"type" : "double",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
@@ -100,19 +358,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -130,19 +388,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -160,19 +418,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -190,19 +448,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -220,19 +478,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -250,19 +508,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -280,19 +538,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -310,19 +568,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -340,19 +598,37 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
@@ -370,7 +646,25 @@
"name" : "buckets",
"type" : "double",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
@@ -388,19 +682,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -418,19 +712,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -448,19 +742,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -478,19 +772,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -508,19 +802,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -538,19 +832,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -568,19 +862,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -598,19 +892,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -628,19 +922,37 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
@@ -658,7 +970,25 @@
"name" : "buckets",
"type" : "double",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
@@ -676,19 +1006,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -706,19 +1036,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -736,19 +1066,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "double",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -766,19 +1096,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -796,19 +1126,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -826,19 +1156,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "integer",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -856,19 +1186,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "double",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -886,19 +1216,19 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "integer",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
}
],
"variadic" : false,
@@ -916,19 +1246,37 @@
"name" : "buckets",
"type" : "integer",
"optional" : false,
- "description" : "Target number of buckets."
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
},
{
"name" : "from",
"type" : "long",
"optional" : true,
- "description" : "Start of the range. Can be a number or a date expressed as a string."
+ "description" : "Start of the range. Can be a number, a date or a date expressed as a string."
},
{
"name" : "to",
"type" : "long",
"optional" : true,
- "description" : "End of the range. Can be a number or a date expressed as a string."
+ "description" : "End of the range. Can be a number, a date or a date expressed as a string."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Numeric or date expression from which to derive buckets."
+ },
+ {
+ "name" : "buckets",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted."
}
],
"variadic" : false,
diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json
index 5959eed62d37b..27705cd3897f9 100644
--- a/docs/reference/esql/functions/kibana/definition/case.json
+++ b/docs/reference/esql/functions/kibana/definition/case.json
@@ -50,13 +50,13 @@
},
{
"name" : "trueValue",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches."
}
],
"variadic" : true,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json
index f00f471e63ecc..2459a4d51bb2d 100644
--- a/docs/reference/esql/functions/kibana/definition/coalesce.json
+++ b/docs/reference/esql/functions/kibana/definition/coalesce.json
@@ -74,19 +74,19 @@
"params" : [
{
"name" : "first",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Expression to evaluate."
},
{
"name" : "rest",
- "type" : "datetime",
+ "type" : "date",
"optional" : true,
"description" : "Other expression to evaluate."
}
],
"variadic" : true,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/definition/count.json b/docs/reference/esql/functions/kibana/definition/count.json
new file mode 100644
index 0000000000000..2a15fb3bdd335
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/count.json
@@ -0,0 +1,159 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "agg",
+ "name" : "count",
+ "description" : "Returns the total number (count) of input values.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "boolean",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "cartesian_point",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "geo_point",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "ip",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : true,
+ "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ }
+ ],
+ "examples" : [
+ "FROM employees\n| STATS COUNT(height)",
+ "FROM employees \n| STATS count = COUNT(*) BY languages \n| SORT languages DESC",
+ "ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS word_count = COUNT(SPLIT(words, \";\"))",
+ "ROW n=1\n| WHERE n < 0\n| STATS COUNT(n)",
+ "ROW n=1\n| STATS COUNT(n > 0 OR NULL), COUNT(n < 0 OR NULL)"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/count_distinct.json b/docs/reference/esql/functions/kibana/definition/count_distinct.json
new file mode 100644
index 0000000000000..f6a148783ba42
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/count_distinct.json
@@ -0,0 +1,607 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "agg",
+ "name" : "count_distinct",
+ "description" : "Returns the approximate number of distinct values.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "date",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "integer",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : "Column or literal for which to count the number of distinct values."
+ },
+ {
+ "name" : "precision",
+ "type" : "unsigned_long",
+ "optional" : true,
+ "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ }
+ ],
+ "examples" : [
+ "FROM hosts\n| STATS COUNT_DISTINCT(ip0), COUNT_DISTINCT(ip1)",
+ "FROM hosts\n| STATS COUNT_DISTINCT(ip0, 80000), COUNT_DISTINCT(ip1, 5)",
+ "ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS distinct_word_count = COUNT_DISTINCT(SPLIT(words, \";\"))"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/date_diff.json b/docs/reference/esql/functions/kibana/definition/date_diff.json
index 7995d3c6d32b6..d6589f041075d 100644
--- a/docs/reference/esql/functions/kibana/definition/date_diff.json
+++ b/docs/reference/esql/functions/kibana/definition/date_diff.json
@@ -14,13 +14,13 @@
},
{
"name" : "startTimestamp",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "A string representing an end timestamp"
}
@@ -38,13 +38,13 @@
},
{
"name" : "startTimestamp",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "A string representing an end timestamp"
}
diff --git a/docs/reference/esql/functions/kibana/definition/date_extract.json b/docs/reference/esql/functions/kibana/definition/date_extract.json
index 75cedcc191b50..557f0e0a47e54 100644
--- a/docs/reference/esql/functions/kibana/definition/date_extract.json
+++ b/docs/reference/esql/functions/kibana/definition/date_extract.json
@@ -14,7 +14,7 @@
},
{
"name" : "date",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Date expression. If `null`, the function returns `null`."
}
@@ -32,7 +32,7 @@
},
{
"name" : "date",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Date expression. If `null`, the function returns `null`."
}
diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json
index 5e8587c046d70..7bd01d7f4ef31 100644
--- a/docs/reference/esql/functions/kibana/definition/date_format.json
+++ b/docs/reference/esql/functions/kibana/definition/date_format.json
@@ -14,7 +14,7 @@
},
{
"name" : "date",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Date expression. If `null`, the function returns `null`."
}
@@ -32,7 +32,7 @@
},
{
"name" : "date",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Date expression. If `null`, the function returns `null`."
}
diff --git a/docs/reference/esql/functions/kibana/definition/date_parse.json b/docs/reference/esql/functions/kibana/definition/date_parse.json
index 890179143bef8..9400340750c2a 100644
--- a/docs/reference/esql/functions/kibana/definition/date_parse.json
+++ b/docs/reference/esql/functions/kibana/definition/date_parse.json
@@ -20,7 +20,7 @@
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -38,7 +38,7 @@
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -56,7 +56,7 @@
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -74,7 +74,7 @@
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
}
],
"examples" : [
diff --git a/docs/reference/esql/functions/kibana/definition/date_trunc.json b/docs/reference/esql/functions/kibana/definition/date_trunc.json
index 3d8658c496529..bd3f362d1670b 100644
--- a/docs/reference/esql/functions/kibana/definition/date_trunc.json
+++ b/docs/reference/esql/functions/kibana/definition/date_trunc.json
@@ -14,13 +14,13 @@
},
{
"name" : "date",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Date expression"
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -32,13 +32,13 @@
},
{
"name" : "date",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : "Date expression"
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
}
],
"examples" : [
diff --git a/docs/reference/esql/functions/kibana/definition/div.json b/docs/reference/esql/functions/kibana/definition/div.json
new file mode 100644
index 0000000000000..8bd2c33720d5f
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/div.json
@@ -0,0 +1,189 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "div",
+ "description" : "Divide one number by another. If either field is <> then the result is `null`.",
+ "note" : "Division of two integer types will yield an integer result, rounding towards 0. If you need floating point division, <> one of the arguments to a `DOUBLE`.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "integer"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "A numeric value."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "A numeric value."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "unsigned_long"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/ends_with.json b/docs/reference/esql/functions/kibana/definition/ends_with.json
index b43181817ef9e..754d2c965287b 100644
--- a/docs/reference/esql/functions/kibana/definition/ends_with.json
+++ b/docs/reference/esql/functions/kibana/definition/ends_with.json
@@ -22,6 +22,42 @@
"variadic" : false,
"returnType" : "boolean"
},
+ {
+ "params" : [
+ {
+ "name" : "str",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "String expression. If `null`, the function returns `null`."
+ },
+ {
+ "name" : "suffix",
+ "type" : "text",
+ "optional" : false,
+ "description" : "String expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "str",
+ "type" : "text",
+ "optional" : false,
+ "description" : "String expression. If `null`, the function returns `null`."
+ },
+ {
+ "name" : "suffix",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "String expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
{
"params" : [
{
diff --git a/docs/reference/esql/functions/kibana/definition/equals.json b/docs/reference/esql/functions/kibana/definition/equals.json
new file mode 100644
index 0000000000000..eca80ccdbf657
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/equals.json
@@ -0,0 +1,405 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "equals",
+ "description" : "Check if two fields are equal. If either field is <> then the result is `null`.",
+ "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "cartesian_point",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "cartesian_point",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "cartesian_shape",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "cartesian_shape",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "geo_point",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "geo_point",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "geo_shape",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "geo_shape",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/greater_than.json b/docs/reference/esql/functions/kibana/definition/greater_than.json
new file mode 100644
index 0000000000000..7831b0f41cd9d
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/greater_than.json
@@ -0,0 +1,315 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "greater_than",
+ "description" : "Check if one field is greater than another. If either field is <> then the result is `null`.",
+ "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json
new file mode 100644
index 0000000000000..b6a40a838c393
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json
@@ -0,0 +1,315 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "greater_than_or_equal",
+ "description" : "Check if one field is greater than or equal to another. If either field is <> then the result is `null`.",
+ "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/in.json b/docs/reference/esql/functions/kibana/definition/in.json
new file mode 100644
index 0000000000000..abf3bd64e2822
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/in.json
@@ -0,0 +1,263 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "in",
+ "description" : "The `IN` operator allows testing whether a field or expression equals an element in a list of literals, fields or expressions.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "boolean",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "cartesian_point",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "cartesian_point",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "cartesian_shape",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "cartesian_shape",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "double",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "geo_point",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "geo_point",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "geo_shape",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "geo_shape",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "text",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "long",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "text",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "inlist",
+ "type" : "version",
+ "optional" : false,
+ "description" : "A list of items."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ }
+ ],
+ "examples" : [
+ "ROW a = 1, b = 4, c = 3\n| WHERE c-a IN (3, b / 2, a)"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/less_than.json b/docs/reference/esql/functions/kibana/definition/less_than.json
new file mode 100644
index 0000000000000..bf6b9c5c08774
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/less_than.json
@@ -0,0 +1,315 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "less_than",
+ "description" : "Check if one field is less than another. If either field is <> then the result is `null`.",
+ "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json
new file mode 100644
index 0000000000000..4e57161887141
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json
@@ -0,0 +1,315 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "less_than_or_equal",
+ "description" : "Check if one field is less than or equal to another. If either field is <> then the result is `null`.",
+ "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "date",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "ip",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "double",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "integer",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "text",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "unsigned_long",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "lhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ },
+ {
+ "name" : "rhs",
+ "type" : "version",
+ "optional" : false,
+ "description" : "An expression."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "boolean"
+ }
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/like.json b/docs/reference/esql/functions/kibana/definition/like.json
new file mode 100644
index 0000000000000..9a215ff88e399
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/like.json
@@ -0,0 +1,47 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "like",
+ "description" : "Use `LIKE` to filter data based on string patterns using wildcards. `LIKE`\nusually acts on a field placed on the left-hand side of the operator, but it can\nalso act on a constant (literal) expression. The right-hand side of the operator\nrepresents the pattern.\n\nThe following wildcard characters are supported:\n\n* `*` matches zero or more characters.\n* `?` matches one character.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "str",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "A literal expression."
+ },
+ {
+ "name" : "pattern",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : "Pattern."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ },
+ {
+ "params" : [
+ {
+ "name" : "str",
+ "type" : "text",
+ "optional" : false,
+ "description" : "A literal expression."
+ },
+ {
+ "name" : "pattern",
+ "type" : "text",
+ "optional" : false,
+ "description" : "Pattern."
+ }
+ ],
+ "variadic" : true,
+ "returnType" : "boolean"
+ }
+ ],
+ "examples" : [
+ "FROM employees\n| WHERE first_name LIKE \"?b*\"\n| KEEP first_name, last_name"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json
index 853cb9f9a97c3..b13d367d37345 100644
--- a/docs/reference/esql/functions/kibana/definition/max.json
+++ b/docs/reference/esql/functions/kibana/definition/max.json
@@ -20,13 +20,13 @@
"params" : [
{
"name" : "field",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : ""
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -64,6 +64,18 @@
"variadic" : false,
"returnType" : "ip"
},
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
+ },
{
"params" : [
{
@@ -75,6 +87,30 @@
],
"variadic" : false,
"returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "text"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "version"
}
],
"examples" : [
diff --git a/docs/reference/esql/functions/kibana/definition/median.json b/docs/reference/esql/functions/kibana/definition/median.json
new file mode 100644
index 0000000000000..4887a4497e813
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/median.json
@@ -0,0 +1,49 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "agg",
+ "name" : "median",
+ "description" : "The value that is greater than half of all values and less than half of all values, also known as the 50% <>.",
+ "note" : "Like <>, `MEDIAN` is <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "double",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "integer",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "long",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ }
+ ],
+ "examples" : [
+ "FROM employees\n| STATS MEDIAN(salary), PERCENTILE(salary, 50)",
+ "FROM employees\n| STATS median_max_salary_change = MEDIAN(MV_MAX(salary_change))"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/median_absolute_deviation.json b/docs/reference/esql/functions/kibana/definition/median_absolute_deviation.json
new file mode 100644
index 0000000000000..4a8b1cd30611f
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/median_absolute_deviation.json
@@ -0,0 +1,49 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "agg",
+ "name" : "median_absolute_deviation",
+ "description" : "Returns the median absolute deviation, a measure of variability. It is a robust statistic, meaning that it is useful for describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive than standard deviation.\n\nIt is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`.",
+ "note" : "Like <>, `MEDIAN_ABSOLUTE_DEVIATION` is <>.",
+ "signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "double",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "integer",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ },
+ {
+ "params" : [
+ {
+ "name" : "number",
+ "type" : "long",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "double"
+ }
+ ],
+ "examples" : [
+ "FROM employees\n| STATS MEDIAN(salary), MEDIAN_ABSOLUTE_DEVIATION(salary)",
+ "FROM employees\n| STATS m_a_d_max_salary_change = MEDIAN_ABSOLUTE_DEVIATION(MV_MAX(salary_change))"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json
index 1c0c02eb9860f..338ed10d67b2e 100644
--- a/docs/reference/esql/functions/kibana/definition/min.json
+++ b/docs/reference/esql/functions/kibana/definition/min.json
@@ -20,13 +20,13 @@
"params" : [
{
"name" : "field",
- "type" : "datetime",
+ "type" : "date",
"optional" : false,
"description" : ""
}
],
"variadic" : false,
- "returnType" : "datetime"
+ "returnType" : "date"
},
{
"params" : [
@@ -64,6 +64,18 @@
"variadic" : false,
"returnType" : "ip"
},
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "keyword",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
+ },
{
"params" : [
{
@@ -75,6 +87,30 @@
],
"variadic" : false,
"returnType" : "long"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "text",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "text"
+ },
+ {
+ "params" : [
+ {
+ "name" : "field",
+ "type" : "version",
+ "optional" : false,
+ "description" : ""
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "version"
}
],
"examples" : [
diff --git a/docs/reference/esql/functions/kibana/definition/mod.json b/docs/reference/esql/functions/kibana/definition/mod.json
new file mode 100644
index 0000000000000..c43f697127249
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/mod.json
@@ -0,0 +1,188 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "operator",
+ "name" : "mod",
+ "description" : "Divide one number by another and return the remainder. If either field is <