diff --git a/docs/changelog/108088.yaml b/docs/changelog/108088.yaml
new file mode 100644
index 0000000000000..95c58f6dc19f1
--- /dev/null
+++ b/docs/changelog/108088.yaml
@@ -0,0 +1,5 @@
+pr: 108088
+summary: Add a SIMD (AVX2) optimised vector distance function for int7 on x64
+area: "Search"
+type: enhancement
+issues: []
diff --git a/docs/changelog/108459.yaml b/docs/changelog/108459.yaml
new file mode 100644
index 0000000000000..5e05797f284be
--- /dev/null
+++ b/docs/changelog/108459.yaml
@@ -0,0 +1,6 @@
+pr: 108459
+summary: Do not use global ordinals strategy if the leaf reader context cannot be
+ obtained
+area: Machine Learning
+type: bug
+issues: []
diff --git a/docs/reference/esql/functions/date-time-functions.asciidoc b/docs/reference/esql/functions/date-time-functions.asciidoc
index 8ce26eaabe381..eceb6378426a2 100644
--- a/docs/reference/esql/functions/date-time-functions.asciidoc
+++ b/docs/reference/esql/functions/date-time-functions.asciidoc
@@ -21,4 +21,4 @@ include::layout/date_extract.asciidoc[]
include::layout/date_format.asciidoc[]
include::layout/date_parse.asciidoc[]
include::layout/date_trunc.asciidoc[]
-include::now.asciidoc[]
+include::layout/now.asciidoc[]
diff --git a/docs/reference/esql/functions/description/now.asciidoc b/docs/reference/esql/functions/description/now.asciidoc
new file mode 100644
index 0000000000000..4852c98b4980a
--- /dev/null
+++ b/docs/reference/esql/functions/description/now.asciidoc
@@ -0,0 +1,5 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Description*
+
+Returns current date and time.
diff --git a/docs/reference/esql/functions/examples/now.asciidoc b/docs/reference/esql/functions/examples/now.asciidoc
new file mode 100644
index 0000000000000..b8953de93724c
--- /dev/null
+++ b/docs/reference/esql/functions/examples/now.asciidoc
@@ -0,0 +1,22 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Examples*
+
+[source.merge.styled,esql]
+----
+include::{esql-specs}/date.csv-spec[tag=docsNow]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/date.csv-spec[tag=docsNow-result]
+|===
+To retrieve logs from the last hour:
+[source.merge.styled,esql]
+----
+include::{esql-specs}/date.csv-spec[tag=docsNowWhere]
+----
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+include::{esql-specs}/date.csv-spec[tag=docsNowWhere-result]
+|===
+
diff --git a/docs/reference/esql/functions/kibana/definition/now.json b/docs/reference/esql/functions/kibana/definition/now.json
new file mode 100644
index 0000000000000..9cdb4945afa2e
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/definition/now.json
@@ -0,0 +1,16 @@
+{
+ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
+ "type" : "eval",
+ "name" : "now",
+ "description" : "Returns current date and time.",
+ "signatures" : [
+ {
+ "params" : [ ],
+ "returnType" : "datetime"
+ }
+ ],
+ "examples" : [
+ "ROW current_date = NOW()",
+ "FROM sample_data\n| WHERE @timestamp > NOW() - 1 hour"
+ ]
+}
diff --git a/docs/reference/esql/functions/kibana/docs/now.md b/docs/reference/esql/functions/kibana/docs/now.md
new file mode 100644
index 0000000000000..5143dc843ebd8
--- /dev/null
+++ b/docs/reference/esql/functions/kibana/docs/now.md
@@ -0,0 +1,10 @@
+
+
+### NOW
+Returns current date and time.
+
+```
+ROW current_date = NOW()
+```
diff --git a/docs/reference/esql/functions/layout/now.asciidoc b/docs/reference/esql/functions/layout/now.asciidoc
new file mode 100644
index 0000000000000..52341c1665619
--- /dev/null
+++ b/docs/reference/esql/functions/layout/now.asciidoc
@@ -0,0 +1,15 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+[discrete]
+[[esql-now]]
+=== `NOW`
+
+*Syntax*
+
+[.text-center]
+image::esql/functions/signature/now.svg[Embedded,opts=inline]
+
+include::../parameters/now.asciidoc[]
+include::../description/now.asciidoc[]
+include::../types/now.asciidoc[]
+include::../examples/now.asciidoc[]
diff --git a/docs/reference/esql/functions/now.asciidoc b/docs/reference/esql/functions/now.asciidoc
deleted file mode 100644
index 3c46f557acd1f..0000000000000
--- a/docs/reference/esql/functions/now.asciidoc
+++ /dev/null
@@ -1,28 +0,0 @@
-[discrete]
-[[esql-now]]
-=== `NOW`
-
-*Syntax*
-
-[source,esql]
-----
-NOW()
-----
-
-*Description*
-
-Returns current date and time.
-
-*Example*
-
-[source,esql]
-----
-include::{esql-specs}/date.csv-spec[tag=docsNow]
-----
-
-To retrieve logs from the last hour:
-
-[source,esql]
-----
-include::{esql-specs}/date.csv-spec[tag=docsNowWhere]
-----
\ No newline at end of file
diff --git a/docs/reference/esql/functions/parameters/now.asciidoc b/docs/reference/esql/functions/parameters/now.asciidoc
new file mode 100644
index 0000000000000..25b3c973f1a26
--- /dev/null
+++ b/docs/reference/esql/functions/parameters/now.asciidoc
@@ -0,0 +1,3 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Parameters*
diff --git a/docs/reference/esql/functions/signature/now.svg b/docs/reference/esql/functions/signature/now.svg
new file mode 100644
index 0000000000000..2cd48ac561408
--- /dev/null
+++ b/docs/reference/esql/functions/signature/now.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/reference/esql/functions/types/now.asciidoc b/docs/reference/esql/functions/types/now.asciidoc
new file mode 100644
index 0000000000000..5737d98f2f7db
--- /dev/null
+++ b/docs/reference/esql/functions/types/now.asciidoc
@@ -0,0 +1,9 @@
+// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
+
+*Supported types*
+
+[%header.monospaced.styled,format=dsv,separator=|]
+|===
+result
+datetime
+|===
diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle
index 168eb533fea74..7a545787bbdae 100644
--- a/libs/native/libraries/build.gradle
+++ b/libs/native/libraries/build.gradle
@@ -18,7 +18,7 @@ configurations {
}
var zstdVersion = "1.5.5"
-var vecVersion = "1.0.6"
+var vecVersion = "1.0.8"
repositories {
exclusiveContent {
diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java
index 56017d3a8a20a..c390cfc9289c6 100644
--- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java
+++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java
@@ -45,7 +45,15 @@ public Optional getVectorSimilarityFunctions() {
}
static boolean isNativeVectorLibSupported() {
- return Runtime.version().feature() >= 21 && isMacOrLinuxAarch64() && checkEnableSystemProperty();
+ return Runtime.version().feature() >= 21 && (isMacOrLinuxAarch64() || isLinuxAmd64()) && checkEnableSystemProperty();
+ }
+
+ /**
+ * Returns true iff the architecture is x64 (amd64) and the OS Linux (the OS we currently support for the native lib).
+ */
+ static boolean isLinuxAmd64() {
+ String name = System.getProperty("os.name");
+ return (name.startsWith("Linux")) && System.getProperty("os.arch").equals("amd64");
}
/** Returns true iff the OS is Mac or Linux, and the architecture is aarch64. */
diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java
index adf32874c04f1..8c4cbb688abcd 100644
--- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java
+++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctionsTests.java
@@ -37,7 +37,9 @@ public boolean supported() {
var arch = System.getProperty("os.arch");
var osName = System.getProperty("os.name");
- if (jdkVersion >= 21 && arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux"))) {
+ if (jdkVersion >= 21
+ && ((arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux")))
+ || (arch.equals("amd64") && osName.equals("Linux")))) {
assertThat(vectorSimilarityFunctions, isPresent());
return true;
} else {
diff --git a/libs/vec/native/Dockerfile b/libs/vec/native/Dockerfile
index 25dcf4d4854d0..66eb7e92ef479 100644
--- a/libs/vec/native/Dockerfile
+++ b/libs/vec/native/Dockerfile
@@ -4,6 +4,7 @@ RUN apt update
RUN apt install -y gcc g++ openjdk-17-jdk
COPY . /workspace
WORKDIR /workspace
-RUN ./gradlew --quiet --console=plain clean vecSharedLibrary
+RUN ./gradlew --quiet --console=plain clean buildSharedLibrary
+RUN strip --strip-unneeded build/output/libvec.so
-CMD cat build/libs/vec/shared/libvec.so
+CMD cat build/output/libvec.so
diff --git a/libs/vec/native/build.gradle b/libs/vec/native/build.gradle
index 6a658da0644b7..7edf46d406862 100644
--- a/libs/vec/native/build.gradle
+++ b/libs/vec/native/build.gradle
@@ -12,9 +12,10 @@ var os = org.gradle.internal.os.OperatingSystem.current()
// To update this library run publish_vec_binaries.sh ( or ./gradlew vecSharedLibrary )
// Or
// For local development, build the docker image with:
-// docker build --platform linux/arm64 --progress=plain .
+// docker build --platform linux/arm64 --progress=plain . (for aarch64)
+// docker build --platform linux/amd64 --progress=plain . (for x64)
// Grab the image id from the console output, then, e.g.
-// docker run 9c9f36564c148b275aeecc42749e7b4580ded79dcf51ff6ccc008c8861e7a979 > build/libs/vec/shared/libvec.so
+// docker run 9c9f36564c148b275aeecc42749e7b4580ded79dcf51ff6ccc008c8861e7a979 > build/libs/vec/shared/$arch/libvec.so
//
// To run tests and benchmarks on a locally built libvec,
// 1. Temporarily comment out the download in libs/native/library/build.gradle
@@ -30,26 +31,83 @@ var os = org.gradle.internal.os.OperatingSystem.current()
group = 'org.elasticsearch'
+def platformName = System.getProperty("os.arch");
+
model {
+ platforms {
+ aarch64 {
+ architecture "aarch64"
+ }
+ amd64 {
+ architecture "x86-64"
+ }
+ }
toolChains {
gcc(Gcc) {
target("aarch64") {
cCompiler.executable = "/usr/bin/gcc"
+ cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=armv8-a"]) }
+ }
+ target("amd64") {
+ cCompiler.executable = "/usr/bin/gcc"
+ cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=core-avx2", "-Wno-incompatible-pointer-types"]) }
}
}
- clang(Clang)
- }
- platforms {
- aarch64 {
- architecture "aarch64"
+ cl(VisualCpp) {
+ eachPlatform { toolchain ->
+ def platform = toolchain.getPlatform()
+ if (platform.name == "x64") {
+ cCompiler.withArguments { args -> args.addAll(["/O2", "/LD", "-march=core-avx2"]) }
+ }
+ }
+ }
+ clang(Clang) {
+ target("amd64") {
+ cCompiler.withArguments { args -> args.addAll(["-O3", "-std=c99", "-march=core-avx2"]) }
+ }
}
}
components {
vec(NativeLibrarySpec) {
targetPlatform "aarch64"
- binaries.withType(SharedLibraryBinarySpec) {
- cCompiler.args "-O3", "-std=c99", "-march=armv8-a"
+ targetPlatform "amd64"
+
+ sources {
+ c {
+ source {
+ srcDir "src/vec/c/${platformName}/"
+ include "*.c"
+ }
+ exportedHeaders {
+ srcDir "src/vec/headers/"
+ }
+ }
+ }
+ }
+ }
+}
+
+tasks.register('buildSharedLibrary') {
+ description = 'Assembles native shared library for the host architecture'
+ if (platformName.equals("aarch64")) {
+ dependsOn tasks.vecAarch64SharedLibrary
+ doLast {
+ copy {
+ from tasks.linkVecAarch64SharedLibrary.outputs.files.files
+ into layout.buildDirectory.dir('output');
+ duplicatesStrategy = 'INCLUDE'
+ }
+ }
+ } else if (platformName.equals("amd64")) {
+ dependsOn tasks.vecAmd64SharedLibrary
+ doLast {
+ copy {
+ from tasks.linkVecAmd64SharedLibrary.outputs.files.files
+ into layout.buildDirectory.dir('output');
+ duplicatesStrategy = 'INCLUDE'
}
}
+ } else {
+ throw new GradleException("Unsupported platform: " + platformName)
}
}
diff --git a/libs/vec/native/publish_vec_binaries.sh b/libs/vec/native/publish_vec_binaries.sh
index e17690160e253..2ed6c750ab9e8 100755
--- a/libs/vec/native/publish_vec_binaries.sh
+++ b/libs/vec/native/publish_vec_binaries.sh
@@ -19,7 +19,7 @@ if [ -z "$ARTIFACTORY_API_KEY" ]; then
exit 1;
fi
-VERSION="1.0.6"
+VERSION="1.0.8"
ARTIFACTORY_REPOSITORY="${ARTIFACTORY_REPOSITORY:-https://artifactory.elastic.dev/artifactory/elasticsearch-native/}"
TEMP=$(mktemp -d)
@@ -29,16 +29,22 @@ if curl -sS -I --fail --location "${ARTIFACTORY_REPOSITORY}/org/elasticsearch/ve
fi
echo 'Building Darwin binary...'
-./gradlew --quiet --console=plain vecSharedLibrary
+./gradlew --quiet --console=plain vecAarch64SharedLibrary
echo 'Building Linux binary...'
DOCKER_IMAGE=$(docker build --platform linux/arm64 --quiet .)
-docker run $DOCKER_IMAGE > build/libs/vec/shared/libvec.so
+docker run $DOCKER_IMAGE > build/libs/vec/shared/aarch64/libvec.so
+
+echo 'Building Linux x64 binary...'
+DOCKER_IMAGE=$(docker build --platform linux/amd64 --quiet .)
+docker run --platform linux/amd64 $DOCKER_IMAGE > build/libs/vec/shared/amd64/libvec.so
mkdir -p $TEMP/darwin-aarch64
mkdir -p $TEMP/linux-aarch64
-cp build/libs/vec/shared/libvec.dylib $TEMP/darwin-aarch64/
-cp build/libs/vec/shared/libvec.so $TEMP/linux-aarch64/
+mkdir -p $TEMP/linux-x64
+cp build/libs/vec/shared/aarch64/libvec.dylib $TEMP/darwin-aarch64/
+cp build/libs/vec/shared/aarch64/libvec.so $TEMP/linux-aarch64/
+cp build/libs/vec/shared/amd64/libvec.so $TEMP/linux-x64/
echo 'Uploading to Artifactory...'
(cd $TEMP && zip -rq - .) | curl -sS -X PUT -H "X-JFrog-Art-Api: ${ARTIFACTORY_API_KEY}" --data-binary @- --location "${ARTIFACTORY_REPOSITORY}/org/elasticsearch/vec/${VERSION}/vec-${VERSION}.zip"
diff --git a/libs/vec/native/src/vec/c/vec.c b/libs/vec/native/src/vec/c/aarch64/vec.c
similarity index 99%
rename from libs/vec/native/src/vec/c/vec.c
rename to libs/vec/native/src/vec/c/aarch64/vec.c
index 05dfe64a3be9b..478e5e84d3859 100644
--- a/libs/vec/native/src/vec/c/vec.c
+++ b/libs/vec/native/src/vec/c/aarch64/vec.c
@@ -121,7 +121,7 @@ static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) {
EXPORT int32_t sqr7u(int8_t* a, int8_t* b, size_t dims) {
int32_t res = 0;
int i = 0;
- if (i > SQR7U_STRIDE_BYTES_LEN) {
+ if (dims > SQR7U_STRIDE_BYTES_LEN) {
i += dims & ~(SQR7U_STRIDE_BYTES_LEN - 1);
res = sqr7u_inner(a, b, i);
}
diff --git a/libs/vec/native/src/vec/c/amd64/vec.c b/libs/vec/native/src/vec/c/amd64/vec.c
new file mode 100644
index 0000000000000..c9a49ad2d1d4d
--- /dev/null
+++ b/libs/vec/native/src/vec/c/amd64/vec.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+#include
+#include
+#include "vec.h"
+
+#include
+#include
+
+#ifndef DOT7U_STRIDE_BYTES_LEN
+#define DOT7U_STRIDE_BYTES_LEN 32 // Must be a power of 2
+#endif
+
+#ifndef SQR7U_STRIDE_BYTES_LEN
+#define SQR7U_STRIDE_BYTES_LEN 32 // Must be a power of 2
+#endif
+
+#ifdef _MSC_VER
+#include
+#elif __GNUC__
+#include
+#elif __clang__
+#include
+#endif
+
+// Multi-platform CPUID "intrinsic"; it takes as input a "functionNumber" (or "leaf", the eax registry). "Subleaf"
+// is always 0. Output is stored in the passed output parameter: output[0] = eax, output[1] = ebx, output[2] = ecx,
+// output[3] = edx
+static inline void cpuid(int output[4], int functionNumber) {
+#if defined(__GNUC__) || defined(__clang__)
+ // use inline assembly, Gnu/AT&T syntax
+ int a, b, c, d;
+ __asm("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "a"(functionNumber), "c"(0) : );
+ output[0] = a;
+ output[1] = b;
+ output[2] = c;
+ output[3] = d;
+
+#elif defined (_MSC_VER)
+ __cpuidex(output, functionNumber, 0);
+#else
+ #error Unsupported compiler
+#endif
+}
+
+// Utility function to horizontally add 8 32-bit integers
+static inline int hsum_i32_8(const __m256i a) {
+ const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
+ const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
+ const __m128i sum64 = _mm_add_epi32(hi64, sum128);
+ const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
+}
+
+EXPORT int vec_caps() {
+ int cpuInfo[4] = {-1};
+ // Calling __cpuid with 0x0 as the function_id argument
+ // gets the number of the highest valid function ID.
+ cpuid(cpuInfo, 0);
+ int functionIds = cpuInfo[0];
+ if (functionIds >= 7) {
+ cpuid(cpuInfo, 7);
+ int ebx = cpuInfo[1];
+ // AVX2 flag is the 5th bit
+ // We assume that all processors that have AVX2 also have FMA3
+ return (ebx & (1 << 5)) != 0;
+ }
+ return 0;
+}
+
+static inline int32_t dot7u_inner(int8_t* a, int8_t* b, size_t dims) {
+ const __m256i ones = _mm256_set1_epi16(1);
+
+ // Init accumulator(s) with 0
+ __m256i acc1 = _mm256_setzero_si256();
+
+#pragma GCC unroll 4
+ for(int i = 0; i < dims; i += DOT7U_STRIDE_BYTES_LEN) {
+ // Load packed 8-bit integers
+ __m256i va1 = _mm256_loadu_si256(a + i);
+ __m256i vb1 = _mm256_loadu_si256(b + i);
+
+ // Perform multiplication and create 16-bit values
+ // Vertically multiply each unsigned 8-bit integer from va with the corresponding
+ // 8-bit integer from vb, producing intermediate signed 16-bit integers.
+ const __m256i vab = _mm256_maddubs_epi16(va1, vb1);
+ // Horizontally add adjacent pairs of intermediate signed 16-bit integers, and pack the results.
+ acc1 = _mm256_add_epi32(_mm256_madd_epi16(ones, vab), acc1);
+ }
+
+ // reduce (horizontally add all)
+ return hsum_i32_8(acc1);
+}
+
+EXPORT int32_t dot7u(int8_t* a, int8_t* b, size_t dims) {
+ int32_t res = 0;
+ int i = 0;
+ if (dims > DOT7U_STRIDE_BYTES_LEN) {
+ i += dims & ~(DOT7U_STRIDE_BYTES_LEN - 1);
+ res = dot7u_inner(a, b, i);
+ }
+ for (; i < dims; i++) {
+ res += a[i] * b[i];
+ }
+ return res;
+}
+
+static inline int32_t sqr7u_inner(int8_t *a, int8_t *b, size_t dims) {
+ // Init accumulator(s) with 0
+ __m256i acc1 = _mm256_setzero_si256();
+
+ const __m256i ones = _mm256_set1_epi16(1);
+
+#pragma GCC unroll 4
+ for(int i = 0; i < dims; i += SQR7U_STRIDE_BYTES_LEN) {
+ // Load packed 8-bit integers
+ __m256i va1 = _mm256_loadu_si256(a + i);
+ __m256i vb1 = _mm256_loadu_si256(b + i);
+
+ const __m256i dist1 = _mm256_sub_epi8(va1, vb1);
+ const __m256i abs_dist1 = _mm256_sign_epi8(dist1, dist1);
+ const __m256i sqr1 = _mm256_maddubs_epi16(abs_dist1, abs_dist1);
+
+ acc1 = _mm256_add_epi32(_mm256_madd_epi16(ones, sqr1), acc1);
+ }
+
+ // reduce (accumulate all)
+ return hsum_i32_8(acc1);
+}
+
+EXPORT int32_t sqr7u(int8_t* a, int8_t* b, size_t dims) {
+ int32_t res = 0;
+ int i = 0;
+ if (dims > SQR7U_STRIDE_BYTES_LEN) {
+ i += dims & ~(SQR7U_STRIDE_BYTES_LEN - 1);
+ res = sqr7u_inner(a, b, i);
+ }
+ for (; i < dims; i++) {
+ int32_t dist = a[i] - b[i];
+ res += dist * dist;
+ }
+ return res;
+}
+
diff --git a/libs/vec/native/src/vec/headers/vec.h b/libs/vec/native/src/vec/headers/vec.h
index 5d3806dfccbe6..49fa29ec6fae9 100644
--- a/libs/vec/native/src/vec/headers/vec.h
+++ b/libs/vec/native/src/vec/headers/vec.h
@@ -7,7 +7,7 @@
*/
#ifdef _MSC_VER
-#define EXPORT extern "C" __declspec(dllexport)
+#define EXPORT __declspec(dllexport)
#elif defined(__GNUC__) && !defined(__clang__)
#define EXPORT __attribute__((externally_visible,visibility("default")))
#elif __clang__
diff --git a/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java b/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java
index 771f665fb4084..13f2d5a03ec76 100644
--- a/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java
+++ b/libs/vec/src/test/java/org/elasticsearch/vec/AbstractVectorTestCase.java
@@ -39,7 +39,9 @@ public static boolean supported() {
var arch = System.getProperty("os.arch");
var osName = System.getProperty("os.name");
- if (jdkVersion >= 21 && arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux"))) {
+ if (jdkVersion >= 21
+ && (arch.equals("aarch64") && (osName.startsWith("Mac") || osName.equals("Linux"))
+ || arch.equals("amd64") && osName.equals("Linux"))) {
assertThat(factory, isPresent());
return true;
} else {
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json
new file mode 100644
index 0000000000000..28c341d9983cc
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json
@@ -0,0 +1,47 @@
+{
+ "capabilities": {
+ "documentation": {
+ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/capabilities.html",
+ "description": "Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported"
+ },
+ "stability": "experimental",
+ "visibility": "private",
+ "headers": {
+ "accept": [
+ "application/json"
+ ]
+ },
+ "url": {
+ "paths": [
+ {
+ "path": "/_capabilities",
+ "methods": [
+ "GET"
+ ]
+ }
+ ]
+ },
+ "params": {
+ "method": {
+ "type": "enum",
+ "description": "REST method to check",
+ "options": [
+ "GET", "HEAD", "POST", "PUT", "DELETE"
+ ],
+ "default": "GET"
+ },
+ "path": {
+ "type": "string",
+ "description": "API path to check"
+ },
+ "parameters": {
+ "type": "string",
+ "description": "Comma-separated list of API parameters to check"
+ },
+ "capabilities": {
+ "type": "string",
+ "description": "Comma-separated list of arbitrary API capabilities to check"
+ }
+ }
+ }
+}
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml
new file mode 100644
index 0000000000000..715e696bd1032
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/capabilities/10_basic.yml
@@ -0,0 +1,28 @@
+---
+"Capabilities API":
+
+ - requires:
+ capabilities:
+ - method: GET
+ path: /_capabilities
+ parameters: [method, path, parameters, capabilities]
+ capabilities: []
+ reason: "capabilities api requires itself to be supported"
+
+ - do:
+ capabilities:
+ method: GET
+ path: /_capabilities
+ parameters: method,path,parameters,capabilities
+ error_trace: false
+
+ - match: { supported: true }
+
+ - do:
+ capabilities:
+ method: GET
+ path: /_capabilities
+ parameters: unknown
+ error_trace: false
+
+ - match: { supported: false }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
index 3ae8f8b09aa4a..ca1d22e4a1ce7 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
@@ -14,12 +14,26 @@ setup:
"postings":
"type": "text"
"index_options": "offsets"
+ "nested":
+ "type": "nested"
+ "properties":
+ "text":
+ "type": "text"
+ "vectors":
+ "type": "dense_vector"
+ "dims": 2
+ "index": true
+ "similarity": "l2_norm"
+
- do:
index:
index: test
id: "1"
body:
"text" : "The quick brown fox is brown."
+ "nested":
+ "text": "The quick brown fox is brown."
+ "vectors": [1, 2]
- do:
indices.refresh: {}
@@ -43,6 +57,7 @@ teardown:
"query" : { "multi_match" : { "query" : "quick brown fox", "fields" : [ "text*"] } },
"highlight" : { "type" : "unified", "fields" : { "*" : {} } } }
+ - length: { hits.hits.0.highlight: 3 }
- match: {hits.hits.0.highlight.text.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown."}
@@ -58,6 +73,7 @@ teardown:
"query" : { "combined_fields" : { "query" : "quick brown fox", "fields" : [ "text*"] } },
"highlight" : { "type" : "unified", "fields" : { "*" : {} } } }
+ - length: { hits.hits.0.highlight: 3 }
- match: {hits.hits.0.highlight.text.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown."}
@@ -72,11 +88,13 @@ teardown:
search:
body: {
"query": { "multi_match": { "query": "quick brown fox", "type": "phrase", "fields": [ "text*" ] } },
- "highlight": { "type": "unified", "fields": { "*": { } } } }
+ "highlight": { "type": "unified", "fields": { "*": { } } }
+ }
- - match: { hits.hits.0.highlight.text.0: "The quick brown fox is brown." }
- - match: { hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown." }
- - match: { hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown." }
+ - length: { hits.hits.0.highlight: 3 }
+ - match: { hits.hits.0.highlight.text.0: "The quick brown fox is brown." }
+ - match: { hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown." }
+ - match: { hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown." }
- do:
indices.put_settings:
@@ -90,6 +108,7 @@ teardown:
"query" : { "multi_match" : { "query" : "quick brown fox", "type": "phrase", "fields" : [ "text*"] } },
"highlight" : { "type" : "unified", "fields" : { "*" : {} } } }
+ - length: { hits.hits.0.highlight: 3 }
- match: {hits.hits.0.highlight.text.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown."}
- match: {hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown."}
@@ -100,43 +119,69 @@ teardown:
reason: 'kNN was not correctly skipped until 8.12'
- do:
- indices.create:
- index: test-highlighting-knn
- body:
- mappings:
- "properties":
- "vectors":
- "type": "dense_vector"
- "dims": 2
- "index": true
- "similarity": "l2_norm"
- "text":
- "type": "text"
- "fields":
- "fvh":
- "type": "text"
- "term_vector": "with_positions_offsets"
- "postings":
- "type": "text"
- "index_options": "offsets"
- - do:
- index:
- index: test-highlighting-knn
- id: "1"
- body:
- "text" : "The quick brown fox is brown."
- "vectors": [1, 2]
+ search:
+ index: test
+ body: {
+ "query": { "multi_match": { "query": "quick brown fox", "type": "phrase", "fields": [ "text*" ] } },
+ "highlight": { "type": "unified", "fields": { "text*": { } } },
+ "knn": { "field": "vectors", "query_vector": [1, 2], "k": 10, "num_candidates": 10 } }
+
+ - length: { hits.hits.0.highlight: 3 }
+ - match: { hits.hits.0.highlight.text.0: "The quick brown fox is brown." }
+ - match: { hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown." }
+ - match: { hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown." }
+
+---
+"Test nested queries automatically disable weighted mode":
+ - requires:
+ cluster_features: "gte_v8.15.0"
+ reason: 'nested was not correctly skipped until 8.15'
+
- do:
- indices.refresh: {}
+ search:
+ index: test
+ body: {
+ "query": {
+ "nested": {
+ "path": "nested",
+ "query": {
+ "multi_match": {
+ "query": "quick brown fox",
+ "type": "phrase",
+ "fields": [ "nested.text" ]
+ }
+ }
+ }
+ },
+ "highlight": { "type": "unified", "fields": { "*": { } } }
+ }
+
+ - length: { hits.hits.0.highlight: 1 }
+ - match: { hits.hits.0.highlight.nested\.text.0: "The quick brown fox is brown." }
- do:
search:
- index: test-highlighting-knn
+ index: test
body: {
- "query": { "multi_match": { "query": "quick brown fox", "type": "phrase", "fields": [ "text*" ] } },
- "highlight": { "type": "unified", "fields": { "*": { } } },
- "knn": { "field": "vectors", "query_vector": [1, 2], "k": 10, "num_candidates": 10 } }
+ "query": {
+ "bool": {
+ "must_not": {
+ "nested": {
+ "path": "nested",
+ "query": {
+ "multi_match": { "query": "quick red fox", "type": "phrase", "fields": [ "nested.text" ] }
+ }
+ }
+ },
+ "should": {
+ "multi_match": { "query": "quick brown fox", "type": "phrase", "fields": [ "text*" ] }
+ }
+ }
+ },
+ "highlight": { "type": "unified", "fields": { "text*": { } } }
+ }
+ - length: { hits.hits.0.highlight: 3 }
- match: { hits.hits.0.highlight.text.0: "The quick brown fox is brown." }
- match: { hits.hits.0.highlight.text\.fvh.0: "The quick brown fox is brown." }
- match: { hits.hits.0.highlight.text\.postings.0: "The quick brown fox is brown." }
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java
index 7e4ae040caeca..9b60044c94f70 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java
@@ -15,8 +15,8 @@
import java.io.IOException;
+import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresentWith;
import static org.hamcrest.Matchers.hasSize;
-import static org.hamcrest.Matchers.is;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class SimpleNodesCapabilitiesIT extends ESIntegTestCase {
@@ -31,25 +31,25 @@ public void testNodesCapabilities() throws IOException {
NodesCapabilitiesResponse response = clusterAdmin().nodesCapabilities(new NodesCapabilitiesRequest().path("_capabilities"))
.actionGet();
assertThat(response.getNodes(), hasSize(2));
- assertThat(response.isSupported(), is(true));
+ assertThat(response.isSupported(), isPresentWith(true));
// check we support some parameters of the capabilities API
response = clusterAdmin().nodesCapabilities(new NodesCapabilitiesRequest().path("_capabilities").parameters("method", "path"))
.actionGet();
assertThat(response.getNodes(), hasSize(2));
- assertThat(response.isSupported(), is(true));
+ assertThat(response.isSupported(), isPresentWith(true));
// check we don't support some other parameters of the capabilities API
response = clusterAdmin().nodesCapabilities(new NodesCapabilitiesRequest().path("_capabilities").parameters("method", "invalid"))
.actionGet();
assertThat(response.getNodes(), hasSize(2));
- assertThat(response.isSupported(), is(false));
+ assertThat(response.isSupported(), isPresentWith(false));
// check we don't support a random invalid api
// TODO this is not working yet - see https://github.com/elastic/elasticsearch/issues/107425
/*response = clusterAdmin().nodesCapabilities(new NodesCapabilitiesRequest().path("_invalid"))
.actionGet();
assertThat(response.getNodes(), hasSize(2));
- assertThat(response.isSupported(), is(false));*/
+ assertThat(response.isSupported(), isPresentWith(false));*/
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesResponse.java
index 63fdb9f7da08a..c2acbf65f6e57 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesResponse.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.Optional;
public class NodesCapabilitiesResponse extends BaseNodesResponse implements ToXContentFragment {
protected NodesCapabilitiesResponse(ClusterName clusterName, List nodes, List failures) {
@@ -35,12 +36,15 @@ protected void writeNodesTo(StreamOutput out, List nodes) throws
TransportAction.localOnly();
}
- public boolean isSupported() {
- return getNodes().isEmpty() == false && getNodes().stream().allMatch(NodeCapability::isSupported);
+ public Optional isSupported() {
+ // if there are any failures, we don't know if it is fully supported by all nodes in the cluster
+ if (hasFailures() || getNodes().isEmpty()) return Optional.empty();
+ return Optional.of(getNodes().stream().allMatch(NodeCapability::isSupported));
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- return builder.field("supported", isSupported());
+ Optional supported = isSupported();
+ return builder.field("supported", supported.orElse(null));
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
index 51a8c6ddb3d76..a12d149bbe342 100644
--- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java
@@ -1303,8 +1303,8 @@ public SearchPhase newSearchPhase(
task,
true,
searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis),
- listener.delegateFailureAndWrap((l, iters) -> {
- SearchPhase action = newSearchPhase(
+ listener.delegateFailureAndWrap(
+ (l, iters) -> newSearchPhase(
task,
searchRequest,
executor,
@@ -1317,30 +1317,32 @@ public SearchPhase newSearchPhase(
false,
threadPool,
clusters
- );
- action.start();
- })
- );
- } else {
- // for synchronous CCS minimize_roundtrips=false, use the CCSSingleCoordinatorSearchProgressListener
- // (AsyncSearchTask will not return SearchProgressListener.NOOP, since it uses its own progress listener
- // which delegates to CCSSingleCoordinatorSearchProgressListener when minimizing roundtrips)
- if (clusters.isCcsMinimizeRoundtrips() == false
- && clusters.hasRemoteClusters()
- && task.getProgressListener() == SearchProgressListener.NOOP) {
- task.setProgressListener(new CCSSingleCoordinatorSearchProgressListener());
- }
- final SearchPhaseResults queryResultConsumer = searchPhaseController.newSearchPhaseResults(
- executor,
- circuitBreaker,
- task::isCancelled,
- task.getProgressListener(),
- searchRequest,
- shardIterators.size(),
- exc -> searchTransportService.cancelSearchTask(task, "failed to merge result [" + exc.getMessage() + "]")
+ ).start()
+ )
);
+ }
+ // for synchronous CCS minimize_roundtrips=false, use the CCSSingleCoordinatorSearchProgressListener
+ // (AsyncSearchTask will not return SearchProgressListener.NOOP, since it uses its own progress listener
+ // which delegates to CCSSingleCoordinatorSearchProgressListener when minimizing roundtrips)
+ if (clusters.isCcsMinimizeRoundtrips() == false
+ && clusters.hasRemoteClusters()
+ && task.getProgressListener() == SearchProgressListener.NOOP) {
+ task.setProgressListener(new CCSSingleCoordinatorSearchProgressListener());
+ }
+ final SearchPhaseResults queryResultConsumer = searchPhaseController.newSearchPhaseResults(
+ executor,
+ circuitBreaker,
+ task::isCancelled,
+ task.getProgressListener(),
+ searchRequest,
+ shardIterators.size(),
+ exc -> searchTransportService.cancelSearchTask(task, "failed to merge result [" + exc.getMessage() + "]")
+ );
+ boolean success = false;
+ try {
+ final SearchPhase searchPhase;
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
- return new SearchDfsQueryThenFetchAsyncAction(
+ searchPhase = new SearchDfsQueryThenFetchAsyncAction(
logger,
namedWriteableRegistry,
searchTransportService,
@@ -1359,7 +1361,7 @@ public SearchPhase newSearchPhase(
);
} else {
assert searchRequest.searchType() == QUERY_THEN_FETCH : searchRequest.searchType();
- return new SearchQueryThenFetchAsyncAction(
+ searchPhase = new SearchQueryThenFetchAsyncAction(
logger,
namedWriteableRegistry,
searchTransportService,
@@ -1377,6 +1379,12 @@ public SearchPhase newSearchPhase(
clusters
);
}
+ success = true;
+ return searchPhase;
+ } finally {
+ if (success == false) {
+ queryResultConsumer.close();
+ }
}
}
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java
index c2cd403836593..e81d8d73af9a2 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java
@@ -43,9 +43,16 @@
import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING;
import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY;
+/**
+ * Handles periodic debug logging of information regarding why the cluster has failed to form.
+ * Periodic logging begins once {@link #start()} is called, and ceases on {@link #stop()}.
+ */
public class ClusterFormationFailureHelper {
private static final Logger logger = LogManager.getLogger(ClusterFormationFailureHelper.class);
+ /**
+ * This time period controls how often warning log messages will be written if this node fails to join or form a cluster.
+ */
public static final Setting DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING = Setting.timeSetting(
"discovery.cluster_formation_warning_timeout",
TimeValue.timeValueMillis(10000),
@@ -61,6 +68,16 @@ public class ClusterFormationFailureHelper {
@Nullable // if no warning is scheduled
private volatile WarningScheduler warningScheduler;
+ /**
+ * Works with the {@link JoinHelper} to log the latest node-join attempt failure and cluster state debug information. Must call
+ * {@link ClusterFormationState#start()} to begin.
+ *
+ * @param settings provides the period in which to log cluster formation errors.
+ * @param clusterFormationStateSupplier information about the current believed cluster state (See {@link ClusterFormationState})
+ * @param threadPool the thread pool on which to run debug logging
+ * @param logLastFailedJoinAttempt invokes an instance of the JoinHelper to log the last encountered join failure
+ * (See {@link JoinHelper#logLastFailedJoinAttempt()})
+ */
public ClusterFormationFailureHelper(
Settings settings,
Supplier clusterFormationStateSupplier,
@@ -78,6 +95,10 @@ public boolean isRunning() {
return warningScheduler != null;
}
+ /**
+ * Schedules a warning debug message to be logged in 'clusterFormationWarningTimeout' time, and periodically thereafter, until
+ * {@link ClusterFormationState#stop()} has been called.
+ */
public void start() {
assert warningScheduler == null;
warningScheduler = new WarningScheduler();
@@ -129,7 +150,7 @@ public String toString() {
}
/**
- * If this node believes that cluster formation has failed, this record provides information that can be used to determine why that is.
+ * This record provides node state information that can be used to determine why cluster formation has failed.
*/
public record ClusterFormationState(
List initialMasterNodesSetting,
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java
index 156ba88a7d2b1..daff05f0fb19b 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java
@@ -1781,7 +1781,7 @@ public void run() {
final var nodeEligibility = localNodeMayWinElection(lastAcceptedState, electionStrategy);
if (nodeEligibility.mayWin() == false) {
assert nodeEligibility.reason().isEmpty() == false;
- logger.trace(
+ logger.info(
"skip prevoting as local node may not win election ({}): {}",
nodeEligibility.reason(),
lastAcceptedState.coordinationMetadata()
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
index b960bb02ceb7f..059400ad81cfb 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java
@@ -194,13 +194,23 @@ private void unregisterAndReleaseConnection(DiscoveryNode destination, Releasabl
Releasables.close(connectionReference);
}
- // package-private for testing
+ /**
+ * Saves information about a join failure. The failure information may be logged later via either {@link FailedJoinAttempt#logNow}
+ * or {@link FailedJoinAttempt#lastFailedJoinAttempt}.
+ *
+ * Package-private for testing.
+ */
static class FailedJoinAttempt {
private final DiscoveryNode destination;
private final JoinRequest joinRequest;
private final ElasticsearchException exception;
private final long timestamp;
+ /**
+ * @param destination the master node targeted by the join request.
+ * @param joinRequest the join request that was sent to the perceived master node.
+ * @param exception the error response received in reply to the join request attempt.
+ */
FailedJoinAttempt(DiscoveryNode destination, JoinRequest joinRequest, ElasticsearchException exception) {
this.destination = destination;
this.joinRequest = joinRequest;
@@ -208,10 +218,18 @@ static class FailedJoinAttempt {
this.timestamp = System.nanoTime();
}
+ /**
+ * Logs the failed join attempt exception.
+ * {@link FailedJoinAttempt#getLogLevel(ElasticsearchException)} determines at what log-level the log is written.
+ */
void logNow() {
logger.log(getLogLevel(exception), () -> format("failed to join %s with %s", destination, joinRequest), exception);
}
+ /**
+ * Returns the appropriate log level based on the given exception. Every error is at least DEBUG, but unexpected errors are INFO.
+ * For example, NotMasterException and CircuitBreakingExceptions are DEBUG logs.
+ */
static Level getLogLevel(ElasticsearchException e) {
Throwable cause = e.unwrapCause();
if (cause instanceof CoordinationStateRejectedException
@@ -226,6 +244,10 @@ void logWarnWithTimestamp() {
logger.warn(
() -> format(
"last failed join attempt was %s ago, failed to join %s with %s",
+ // 'timestamp' is when this error exception was received by the local node. If the time that has passed since the error
+ // was originally received is quite large, it could indicate that this is a stale error exception from some prior
+ // out-of-order request response (where a later sent request but earlier received response was successful); or
+ // alternatively an old error could indicate that this node did not retry the join request for a very long time.
TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - timestamp)),
destination,
joinRequest
@@ -235,6 +257,9 @@ void logWarnWithTimestamp() {
}
}
+ /**
+ * Logs a warning message if {@link #lastFailedJoinAttempt} has been set with a failure.
+ */
void logLastFailedJoinAttempt() {
FailedJoinAttempt attempt = lastFailedJoinAttempt.get();
if (attempt != null) {
@@ -247,7 +272,7 @@ public void sendJoinRequest(DiscoveryNode destination, long term, Optional
assert destination.isMasterNode() : "trying to join master-ineligible " + destination;
final StatusInfo statusInfo = nodeHealthService.getHealth();
if (statusInfo.getStatus() == UNHEALTHY) {
- logger.debug("dropping join request to [{}]: [{}]", destination, statusInfo.getInfo());
+ logger.debug("dropping join request to [{}], unhealthy status: [{}]", destination, statusInfo.getInfo());
return;
}
final JoinRequest joinRequest = new JoinRequest(
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java
index 2c024063e2399..9223e02fc946c 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java
@@ -26,6 +26,7 @@
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.version.CompatibilityVersions;
import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
@@ -123,7 +124,14 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex
newState = ClusterState.builder(initialState);
} else {
logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode());
- throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request");
+ throw new NotMasterException(
+ Strings.format(
+ "Node [%s] not master for join request. Current known master [%s], current term [%d]",
+ currentNodes.getLocalNode(),
+ currentNodes.getMasterNode(),
+ term
+ )
+ );
}
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes());
diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java
index 5c1381f730013..c29e248b1a689 100644
--- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java
+++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java
@@ -293,7 +293,8 @@ public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) {
if (parent instanceof ESToParentBlockJoinQuery) {
hasUnknownLeaf[0] = true;
}
- return super.getSubVisitor(occur, parent);
+ // we want to visit all queries, including those within the must_not clauses.
+ return this;
}
});
return hasUnknownLeaf[0];
diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java
index 70801cdef560b..b142e4d567c04 100644
--- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java
+++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java
@@ -76,13 +76,18 @@ public final long getUsageCount() {
@Override
public abstract List routes();
+ private static final Set ALWAYS_SUPPORTED = Set.of("format", "filter_path", "pretty", "human");
+
@Override
public final void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {
// check if the query has any parameters that are not in the supported set (if declared)
Set supported = supportedQueryParameters();
- if (supported != null && supported.containsAll(request.params().keySet()) == false) {
- Set unsupported = Sets.difference(request.params().keySet(), supported);
- throw new IllegalArgumentException(unrecognized(request, unsupported, supported, "parameter"));
+ if (supported != null) {
+ var allSupported = Sets.union(ALWAYS_SUPPORTED, supported);
+ if (allSupported.containsAll(request.params().keySet()) == false) {
+ Set unsupported = Sets.difference(request.params().keySet(), allSupported);
+ throw new IllegalArgumentException(unrecognized(request, unsupported, allSupported, "parameter"));
+ }
}
// prepare the request for execution; has the side effect of touching the request parameters
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhase.java
index 3b8e4e69d9318..68e46186e4505 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhase.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhase.java
@@ -28,7 +28,7 @@ public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) {
}
assert fetchSourceContext.fetchSource();
SourceFilter sourceFilter = fetchSourceContext.filter();
-
+ final boolean filterExcludesAll = sourceFilter.excludesAll();
return new FetchSubPhaseProcessor() {
private int fastPath;
@@ -67,8 +67,13 @@ private void hitExecute(FetchSourceContext fetchSourceContext, HitContext hitCon
return;
}
- // Otherwise, filter the source and add it to the hit.
- source = source.filter(sourceFilter);
+ if (filterExcludesAll) {
+ // we can just add an empty map
+ source = Source.empty(source.sourceContentType());
+ } else {
+ // Otherwise, filter the source and add it to the hit.
+ source = source.filter(sourceFilter);
+ }
if (nestedHit) {
source = extractNested(source, hitContext.hit().getNestedIdentity());
}
diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java
index 232c12e944a96..35f96ee2dc102 100644
--- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java
+++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java
@@ -351,6 +351,7 @@ public Query rewrittenQuery() {
* Adds a releasable that will be freed when this context is closed.
*/
public void addReleasable(Releasable releasable) { // TODO most Releasables are managed by their callers. We probably don't need this.
+ assert closed.get() == false;
releasables.add(releasable);
}
diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java b/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java
index 3bf32159c1676..ceffb32c08b48 100644
--- a/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java
+++ b/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java
@@ -109,4 +109,8 @@ private Function