diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 6c8b8edfcbac1..4bc72aec20972 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.template.yml b/.buildkite/pipelines/periodic-packaging.template.yml index 1a1e46d55f7a4..aff0add62a2b6 100644 --- a/.buildkite/pipelines/periodic-packaging.template.yml +++ b/.buildkite/pipelines/periodic-packaging.template.yml @@ -7,19 +7,14 @@ steps: matrix: setup: image: - - debian-11 - debian-12 - opensuse-leap-15 - - oraclelinux-7 - oraclelinux-8 - - sles-12 - sles-15 - - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 - rocky-8 - rocky-9 - - rhel-7 - rhel-8 - rhel-9 - almalinux-8 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index c1b10a46c62a7..9bcd61ac1273c 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -8,19 +8,14 @@ steps: matrix: setup: image: - - debian-11 - debian-12 - opensuse-leap-15 - - oraclelinux-7 - oraclelinux-8 - - sles-12 - sles-15 - - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 - rocky-8 - rocky-9 - - rhel-7 - rhel-8 - rhel-9 - almalinux-8 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index 79e5a2e8dcdbb..8bee3a78f8316 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -7,19 +7,14 @@ steps: matrix: setup: image: - - debian-11 - debian-12 - opensuse-leap-15 - - oraclelinux-7 - oraclelinux-8 - - sles-12 - sles-15 - - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 - rocky-8 - rocky-9 - - rhel-7 - rhel-8 - rhel-9 - almalinux-8 @@ -90,7 +85,6 @@ steps: setup: image: - amazonlinux-2023 - - amazonlinux-2 agents: provider: aws imagePrefix: elasticsearch-{{matrix.image}} diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 69d11ef1dabb6..3d6095d0b9e63 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -448,7 +448,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -490,7 +490,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.15.6", "8.16.2", "8.17.0", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml index ffc1350aceab3..6c7dadfd454ed 100644 --- a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml +++ b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml @@ -10,19 +10,14 @@ steps: matrix: setup: image: - - debian-11 - debian-12 - opensuse-leap-15 - - oraclelinux-7 - oraclelinux-8 - - sles-12 - sles-15 - - ubuntu-1804 - ubuntu-2004 - ubuntu-2204 - rocky-8 - rocky-9 - - rhel-7 - rhel-8 - rhel-9 - almalinux-8 diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 5514fc376a285..f92881da7fea4 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,5 @@ BWC_VERSION: + - "8.15.6" - "8.16.2" - "8.17.0" - "8.18.0" diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index 6d080e1c80763..bb100b6b23882 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -9,9 +9,10 @@ package org.elasticsearch.gradle.internal +import spock.lang.Unroll + import org.elasticsearch.gradle.fixtures.AbstractGitAwareGradleFuncTest import org.gradle.testkit.runner.TaskOutcome -import spock.lang.Unroll class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest { @@ -23,8 +24,10 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF apply plugin: 'elasticsearch.internal-distribution-bwc-setup' """ execute("git branch origin/8.x", file("cloned")) + execute("git branch origin/8.3", file("cloned")) + execute("git branch origin/8.2", file("cloned")) + execute("git branch origin/8.1", file("cloned")) execute("git branch origin/7.16", file("cloned")) - execute("git branch origin/7.15", file("cloned")) } def "builds distribution from branches via archives extractedAssemble"() { @@ -48,10 +51,11 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:darwin-tar:${expectedAssembleTaskName}") where: - bwcDistVersion | bwcProject | expectedAssembleTaskName - "8.0.0" | "minor" | "extractedAssemble" - "7.16.0" | "staged" | "extractedAssemble" - "7.15.2" | "bugfix" | "extractedAssemble" + bwcDistVersion | bwcProject | expectedAssembleTaskName + "8.4.0" | "minor" | "extractedAssemble" + "8.3.0" | "staged" | "extractedAssemble" + "8.2.1" | "bugfix" | "extractedAssemble" + "8.1.3" | "bugfix2" | "extractedAssemble" } @Unroll @@ -70,8 +74,8 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF where: bwcDistVersion | platform - "8.0.0" | "darwin" - "8.0.0" | "linux" + "8.4.0" | "darwin" + "8.4.0" | "linux" } def "bwc expanded distribution folder can be resolved as bwc project artifact"() { @@ -107,11 +111,11 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF result.task(":resolveExpandedDistribution").outcome == TaskOutcome.SUCCESS result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS and: "assemble task triggered" - result.output.contains("[8.0.0] > Task :distribution:archives:darwin-tar:extractedAssemble") + result.output.contains("[8.4.0] > Task :distribution:archives:darwin-tar:extractedAssemble") result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.x/" + "distribution/archives/darwin-tar/build/install") result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.x/" + - "distribution/archives/darwin-tar/build/install/elasticsearch-8.0.0-SNAPSHOT") + "distribution/archives/darwin-tar/build/install/elasticsearch-8.4.0-SNAPSHOT") } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy index eb6185e5aed57..fc5d432a9ef9a 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy @@ -57,7 +57,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest elasticsearch_distributions { test_distro { - version = "8.0.0" + version = "8.4.0" type = "archive" platform = "linux" architecture = Architecture.current(); @@ -87,7 +87,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest elasticsearch_distributions { test_distro { - version = "8.0.0" + version = "8.4.0" type = "archive" platform = "linux" architecture = Architecture.current(); diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy index e3efe3d7ffbf7..15b057a05e039 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy @@ -40,7 +40,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe given: internalBuild() - subProject(":distribution:bwc:staged") << """ + subProject(":distribution:bwc:minor") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -61,11 +61,11 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe result.task(transformTask).outcome == TaskOutcome.NO_SOURCE } - def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:staged"() { + def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:minor"() { given: internalBuild() - subProject(":distribution:bwc:staged") << """ + subProject(":distribution:bwc:minor") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -98,8 +98,8 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe String api = "foo.json" String test = "10_basic.yml" //add the compatible test and api files, these are the prior version's normal yaml rest tests - file("distribution/bwc/staged/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" - file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" + file("distribution/bwc/minor/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" + file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" when: def result = gradleRunner("yamlRestCompatTest").build() @@ -145,7 +145,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe given: internalBuild() withVersionCatalogue() - subProject(":distribution:bwc:staged") << """ + subProject(":distribution:bwc:minor") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -186,7 +186,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe given: internalBuild() - subProject(":distribution:bwc:staged") << """ + subProject(":distribution:bwc:minor") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -230,7 +230,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe setupRestResources([], []) - file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ + file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ "one": - do: do_.some.key_to_replace: diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/bugfix2/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/distribution/bwc/maintenance/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle index 8c321294b585f..e931537fcd6e9 100644 --- a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle +++ b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/settings.gradle @@ -10,9 +10,11 @@ rootProject.name = "root" include ":distribution:bwc:bugfix" +include ":distribution:bwc:bugfix2" include ":distribution:bwc:minor" include ":distribution:bwc:major" include ":distribution:bwc:staged" +include ":distribution:bwc:maintenance" include ":distribution:archives:darwin-tar" include ":distribution:archives:oss-darwin-tar" include ":distribution:archives:darwin-aarch64-tar" diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index 93c2623a23d31..37b28389ad97b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -21,14 +21,15 @@ import java.util.Optional; import java.util.Set; import java.util.TreeMap; -import java.util.TreeSet; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; +import static java.util.Collections.reverseOrder; import static java.util.Collections.unmodifiableList; +import static java.util.Comparator.comparing; /** * A container for elasticsearch supported version information used in BWC testing. @@ -73,11 +74,11 @@ public class BwcVersions implements Serializable { private final transient List versions; private final Map unreleased; - public BwcVersions(List versionLines) { - this(versionLines, Version.fromString(VersionProperties.getElasticsearch())); + public BwcVersions(List versionLines, List developmentBranches) { + this(versionLines, Version.fromString(VersionProperties.getElasticsearch()), developmentBranches); } - public BwcVersions(Version currentVersionProperty, List allVersions) { + public BwcVersions(Version currentVersionProperty, List allVersions, List developmentBranches) { if (allVersions.isEmpty()) { throw new IllegalArgumentException("Could not parse any versions"); } @@ -86,12 +87,12 @@ public BwcVersions(Version currentVersionProperty, List allVersions) { this.currentVersion = allVersions.get(allVersions.size() - 1); assertCurrentVersionMatchesParsed(currentVersionProperty); - this.unreleased = computeUnreleased(); + this.unreleased = computeUnreleased(developmentBranches); } // Visible for testing - BwcVersions(List versionLines, Version currentVersionProperty) { - this(currentVersionProperty, parseVersionLines(versionLines)); + BwcVersions(List versionLines, Version currentVersionProperty, List developmentBranches) { + this(currentVersionProperty, parseVersionLines(versionLines), developmentBranches); } private static List parseVersionLines(List versionLines) { @@ -126,58 +127,77 @@ public void forPreviousUnreleased(Consumer consumer) { getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).map(unreleased::get).forEach(consumer); } - private String getBranchFor(Version version) { - if (version.equals(currentVersion)) { - // Just assume the current branch is 'main'. It's actually not important, we never check out the current branch. - return "main"; - } else { + private String getBranchFor(Version version, List developmentBranches) { + // If the current version matches a specific feature freeze branch, use that + if (developmentBranches.contains(version.getMajor() + "." + version.getMinor())) { return version.getMajor() + "." + version.getMinor(); + } else if (developmentBranches.contains(version.getMajor() + ".x")) { // Otherwise if an n.x branch exists and we are that major + return version.getMajor() + ".x"; + } else { // otherwise we're the main branch + return "main"; } } - private Map computeUnreleased() { - Set unreleased = new TreeSet<>(); - // The current version is being worked, is always unreleased - unreleased.add(currentVersion); - // Recurse for all unreleased versions starting from the current version - addUnreleased(unreleased, currentVersion, 0); + private Map computeUnreleased(List developmentBranches) { + Map result = new TreeMap<>(); - // Grab the latest version from the previous major if necessary as well, this is going to be a maintenance release - Version maintenance = versions.stream() - .filter(v -> v.getMajor() == currentVersion.getMajor() - 1) - .max(Comparator.naturalOrder()) - .orElseThrow(); - // This is considered the maintenance release only if we haven't yet encountered it - boolean hasMaintenanceRelease = unreleased.add(maintenance); + // The current version is always in development + String currentBranch = getBranchFor(currentVersion, developmentBranches); + result.put(currentVersion, new UnreleasedVersionInfo(currentVersion, currentBranch, ":distribution")); + + // Check for an n.x branch as well + if (currentBranch.equals("main") && developmentBranches.stream().anyMatch(s -> s.endsWith(".x"))) { + // This should correspond to the latest new minor + Version version = versions.stream() + .sorted(Comparator.reverseOrder()) + .filter(v -> v.getMajor() == (currentVersion.getMajor() - 1) && v.getRevision() == 0) + .findFirst() + .orElseThrow(() -> new IllegalStateException("Unable to determine development version for branch")); + String branch = getBranchFor(version, developmentBranches); + assert branch.equals(currentVersion.getMajor() - 1 + ".x") : "Expected branch does not match development branch"; + + result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:minor")); + } - List unreleasedList = unreleased.stream().sorted(Comparator.reverseOrder()).toList(); - Map result = new TreeMap<>(); - boolean newMinor = false; - for (int i = 0; i < unreleasedList.size(); i++) { - Version esVersion = unreleasedList.get(i); - // This is either a new minor or staged release - if (currentVersion.equals(esVersion)) { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution")); - } else if (esVersion.getRevision() == 0) { - // If there are two upcoming unreleased minors then this one is the new minor - if (newMinor == false && unreleasedList.get(i + 1).getRevision() == 0) { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor")); - newMinor = true; - } else if (newMinor == false - && unreleasedList.stream().filter(v -> v.getMajor() == esVersion.getMajor() && v.getRevision() == 0).count() == 1) { - // This is the only unreleased new minor which means we've not yet staged it for release - result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor")); - newMinor = true; - } else { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged")); - } - } else { - // If this is the oldest unreleased version and we have a maintenance release - if (i == unreleasedList.size() - 1 && hasMaintenanceRelease) { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:maintenance")); - } else { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:bugfix")); - } + // Now handle all the feature freeze branches + List featureFreezeBranches = developmentBranches.stream() + .filter(b -> Pattern.matches("[0-9]+\\.[0-9]+", b)) + .sorted(reverseOrder(comparing(s -> Version.fromString(s, Version.Mode.RELAXED)))) + .toList(); + + boolean existingBugfix = false; + for (int i = 0; i < featureFreezeBranches.size(); i++) { + String branch = featureFreezeBranches.get(i); + Version version = versions.stream() + .sorted(Comparator.reverseOrder()) + .filter(v -> v.toString().startsWith(branch)) + .findFirst() + .orElse(null); + + // If we don't know about this version we can ignore it + if (version == null) { + continue; + } + + // If this is the current version we can ignore as we've already handled it + if (version.equals(currentVersion)) { + continue; + } + + // We only maintain compatibility back one major so ignore anything older + if (currentVersion.getMajor() - version.getMajor() > 1) { + continue; + } + + // This is the maintenance version + if (i == featureFreezeBranches.size() - 1) { + result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:maintenance")); + } else if (version.getRevision() == 0) { // This is the next staged minor + result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:staged")); + } else { // This is a bugfix + String project = existingBugfix ? "bugfix2" : "bugfix"; + result.put(version, new UnreleasedVersionInfo(version, branch, ":distribution:bwc:" + project)); + existingBugfix = true; } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 0fb75b59b6096..0d7bcea168df8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -13,8 +13,8 @@ * This class models the different Docker base images that are used to build Docker distributions of Elasticsearch. */ public enum DockerBase { - // "latest" here is intentional, since the image name specifies "8" - DEFAULT("docker.elastic.co/ubi8/ubi-minimal:latest", "", "microdnf"), + // "latest" here is intentional, since the image name specifies "9" + DEFAULT("docker.elastic.co/ubi9/ubi-minimal:latest", "", "microdnf"), // The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank", "yum"), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 4446952fec2bb..720d6a7c2efb6 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.gradle.api.Action; +import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; @@ -112,7 +113,6 @@ public void execute(Task t) { test.jvmArgs( "-Xmx" + System.getProperty("tests.heap.size", "512m"), "-Xms" + System.getProperty("tests.heap.size", "512m"), - "-Djava.security.manager=allow", "-Dtests.testfeatures.enabled=true", "--add-opens=java.base/java.util=ALL-UNNAMED", // TODO: only open these for mockito when it is modularized @@ -127,6 +127,13 @@ public void execute(Task t) { ); test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir)); + test.getJvmArgumentProviders().add(() -> { + if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) { + return List.of("-Djava.security.manager=allow"); + } else { + return List.of(); + } + }); String argline = System.getProperty("tests.jvm.argline"); if (argline != null) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 0535026b2594e..27d2a66feb206 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -8,6 +8,9 @@ */ package org.elasticsearch.gradle.internal.info; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + import org.apache.commons.io.IOUtils; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.BwcVersions; @@ -44,11 +47,13 @@ import java.io.File; import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.UncheckedIOException; import java.nio.file.Files; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Random; @@ -68,6 +73,7 @@ public class GlobalBuildInfoPlugin implements Plugin { private final JavaInstallationRegistry javaInstallationRegistry; private final JvmMetadataDetector metadataDetector; private final ProviderFactory providers; + private final ObjectMapper objectMapper; private JavaToolchainService toolChainService; private Project project; @@ -82,7 +88,7 @@ public GlobalBuildInfoPlugin( this.javaInstallationRegistry = javaInstallationRegistry; this.metadataDetector = new ErrorTraceMetadataDetector(metadataDetector); this.providers = providers; - + this.objectMapper = new ObjectMapper(); } @Override @@ -190,12 +196,27 @@ private BwcVersions resolveBwcVersions() { ); try (var is = new FileInputStream(versionsFilePath)) { List versionLines = IOUtils.readLines(is, "UTF-8"); - return new BwcVersions(versionLines); + return new BwcVersions(versionLines, getDevelopmentBranches()); } catch (IOException e) { throw new IllegalStateException("Unable to resolve to resolve bwc versions from versionsFile.", e); } } + private List getDevelopmentBranches() { + List branches = new ArrayList<>(); + File branchesFile = new File(Util.locateElasticsearchWorkspace(project.getGradle()), "branches.json"); + try (InputStream is = new FileInputStream(branchesFile)) { + JsonNode json = objectMapper.readTree(is); + for (JsonNode node : json.get("branches")) { + branches.add(node.get("branch").asText()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + return branches; + } + private void logGlobalBuildInfo(BuildParameterExtension buildParams) { final String osName = System.getProperty("os.name"); final String osVersion = System.getProperty("os.version"); diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy index 9c7d20d84a670..4d033564a42b4 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy @@ -17,8 +17,9 @@ import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo class BwcVersionsSpec extends Specification { List versionLines = [] - def "current version is next minor with next major and last minor both staged"() { + def "current version is next major"() { given: + addVersion('7.17.10', '8.9.0') addVersion('8.14.0', '9.9.0') addVersion('8.14.1', '9.9.0') addVersion('8.14.2', '9.9.0') @@ -29,25 +30,25 @@ class BwcVersionsSpec extends Specification { addVersion('8.16.1', '9.10.0') addVersion('8.17.0', '9.10.0') addVersion('9.0.0', '10.0.0') - addVersion('9.1.0', '10.1.0') when: - def bwc = new BwcVersions(versionLines, v('9.1.0')) + def bwc = new BwcVersions(versionLines, v('9.0.0'), ['main', '8.x', '8.16', '8.15', '7.17']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ + (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'), (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'), - (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'), - (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.x', ':distribution:bwc:minor'), - (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution') + (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('9.0.0'), v('9.1.0')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0'), v('9.1.0')] + bwc.wireCompatible == [v('8.17.0'), v('9.0.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')] } - def "current is next minor with upcoming minor staged"() { + def "current version is next major with staged minor"() { given: + addVersion('7.17.10', '8.9.0') addVersion('8.14.0', '9.9.0') addVersion('8.14.1', '9.9.0') addVersion('8.14.2', '9.9.0') @@ -57,53 +58,106 @@ class BwcVersionsSpec extends Specification { addVersion('8.16.0', '9.10.0') addVersion('8.16.1', '9.10.0') addVersion('8.17.0', '9.10.0') - addVersion('8.17.1', '9.10.0') + addVersion('8.18.0', '9.10.0') addVersion('9.0.0', '10.0.0') - addVersion('9.1.0', '10.1.0') when: - def bwc = new BwcVersions(versionLines, v('9.1.0')) + def bwc = new BwcVersions(versionLines, v('9.0.0'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'), + (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'), + (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'), + (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'), + (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution:bwc:minor'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), + ] + bwc.wireCompatible == [v('8.18.0'), v('9.0.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0')] + } + + def "current version is first new minor in major series"() { + given: + addVersion('7.17.10', '8.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('8.18.0', '9.10.0') + addVersion('9.0.0', '10.0.0') + addVersion('9.1.0', '10.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('9.1.0'), ['main', '9.0', '8.18']) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'), (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.0', ':distribution:bwc:staged'), - (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution') + (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')] + bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.1.0')] + bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.1.0')] } - def "current version is staged major"() { + def "current version is new minor with single bugfix"() { given: - addVersion('8.14.0', '9.9.0') - addVersion('8.14.1', '9.9.0') - addVersion('8.14.2', '9.9.0') - addVersion('8.15.0', '9.9.0') - addVersion('8.15.1', '9.9.0') - addVersion('8.15.2', '9.9.0') + addVersion('7.17.10', '8.9.0') addVersion('8.16.0', '9.10.0') addVersion('8.16.1', '9.10.0') addVersion('8.17.0', '9.10.0') - addVersion('8.17.1', '9.10.0') + addVersion('8.18.0', '9.10.0') addVersion('9.0.0', '10.0.0') + addVersion('9.0.1', '10.0.0') + addVersion('9.1.0', '10.0.0') when: - def bwc = new BwcVersions(versionLines, v('9.0.0')) + def bwc = new BwcVersions(versionLines, v('9.1.0'), ['main', '9.0', '8.18']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'), - (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), + (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'), + (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'), + (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0')] + bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0')] + bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0')] } - def "current version is major with unreleased next minor"() { + def "current version is new minor with single bugfix and staged minor"() { given: + addVersion('7.17.10', '8.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('8.18.0', '9.10.0') + addVersion('9.0.0', '10.0.0') + addVersion('9.0.1', '10.0.0') + addVersion('9.1.0', '10.0.0') + addVersion('9.2.0', '10.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('9.2.0'), ['main', '9.1', '9.0', '8.18']) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.18', ':distribution:bwc:maintenance'), + (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'), + (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), '9.1', ':distribution:bwc:staged'), + (v('9.2.0')): new UnreleasedVersionInfo(v('9.2.0'), 'main', ':distribution'), + ] + bwc.wireCompatible == [v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0'), v('9.2.0')] + bwc.indexCompatible == [v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0'), v('9.0.0'), v('9.0.1'), v('9.1.0'), v('9.2.0')] + } + + def "current version is next minor"() { + given: + addVersion('7.16.3', '8.9.0') + addVersion('7.17.0', '8.9.0') + addVersion('7.17.1', '8.9.0') addVersion('8.14.0', '9.9.0') addVersion('8.14.1', '9.9.0') addVersion('8.14.2', '9.9.0') @@ -113,24 +167,29 @@ class BwcVersionsSpec extends Specification { addVersion('8.16.0', '9.10.0') addVersion('8.16.1', '9.10.0') addVersion('8.17.0', '9.10.0') - addVersion('9.0.0', '10.0.0') + addVersion('8.17.1', '9.10.0') + addVersion('8.18.0', '9.10.0') when: - def bwc = new BwcVersions(versionLines, v('9.0.0')) + def bwc = new BwcVersions(versionLines, v('8.18.0'), ['main', '8.x', '8.17', '8.16', '7.17']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'), - (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'), - (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), + (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'), + (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix2'), + (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'), + (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('9.0.0')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')] + bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('8.18.0')] + bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('8.18.0')] } - def "current version is major with staged next minor"() { + def "current version is new minor with staged minor"() { given: + addVersion('7.16.3', '8.9.0') + addVersion('7.17.0', '8.9.0') + addVersion('7.17.1', '8.9.0') addVersion('8.14.0', '9.9.0') addVersion('8.14.1', '9.9.0') addVersion('8.14.2', '9.9.0') @@ -138,26 +197,31 @@ class BwcVersionsSpec extends Specification { addVersion('8.15.1', '9.9.0') addVersion('8.15.2', '9.9.0') addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') addVersion('8.17.0', '9.10.0') - addVersion('9.0.0', '10.0.0') + addVersion('8.18.0', '9.10.0') when: - def bwc = new BwcVersions(versionLines, v('9.0.0')) + def bwc = new BwcVersions(versionLines, v('8.18.0'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'), - (v('8.16.0')): new UnreleasedVersionInfo(v('8.16.0'), '8.16', ':distribution:bwc:staged'), - (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'), - (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), + (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'), + (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix2'), + (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'), + (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'), + (v('8.18.0')): new UnreleasedVersionInfo(v('8.18.0'), '8.x', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('9.0.0')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.17.0'), v('9.0.0')] + bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0')] + bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.18.0')] } - def "current version is next bugfix"() { + def "current version is first bugfix"() { given: + addVersion('7.16.3', '8.9.0') + addVersion('7.17.0', '8.9.0') + addVersion('7.17.1', '8.9.0') addVersion('8.14.0', '9.9.0') addVersion('8.14.1', '9.9.0') addVersion('8.14.2', '9.9.0') @@ -166,52 +230,44 @@ class BwcVersionsSpec extends Specification { addVersion('8.15.2', '9.9.0') addVersion('8.16.0', '9.10.0') addVersion('8.16.1', '9.10.0') - addVersion('8.17.0', '9.10.0') - addVersion('8.17.1', '9.10.0') - addVersion('9.0.0', '10.0.0') - addVersion('9.0.1', '10.0.0') when: - def bwc = new BwcVersions(versionLines, v('9.0.1')) + def bwc = new BwcVersions(versionLines, v('8.16.1'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'), - (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), 'main', ':distribution'), + (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'), + (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'), + (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')] + bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1')] + bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1')] } - def "current version is next minor with no staged releases"() { + def "current version is second bugfix"() { given: + addVersion('7.16.3', '8.9.0') + addVersion('7.17.0', '8.9.0') + addVersion('7.17.1', '8.9.0') addVersion('8.14.0', '9.9.0') addVersion('8.14.1', '9.9.0') addVersion('8.14.2', '9.9.0') addVersion('8.15.0', '9.9.0') addVersion('8.15.1', '9.9.0') addVersion('8.15.2', '9.9.0') - addVersion('8.16.0', '9.10.0') - addVersion('8.16.1', '9.10.0') - addVersion('8.17.0', '9.10.0') - addVersion('8.17.1', '9.10.0') - addVersion('9.0.0', '10.0.0') - addVersion('9.0.1', '10.0.0') - addVersion('9.1.0', '10.1.0') when: - def bwc = new BwcVersions(versionLines, v('9.1.0')) + def bwc = new BwcVersions(versionLines, v('8.15.2'), ['main', '8.x', '8.17', '8.16', '8.15', '7.17']) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'), - (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'), - (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution') + (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'), + (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution'), ] - bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')] - bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')] + bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2')] + bwc.indexCompatible == [v('7.16.3'), v('7.17.0'), v('7.17.1'), v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2')] } private void addVersion(String elasticsearch, String lucene) { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java index 639dec280ae9a..7512fa20814c6 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java @@ -16,6 +16,7 @@ import java.io.File; import java.util.Arrays; +import java.util.List; public class AbstractDistributionDownloadPluginTests { protected static Project rootProject; @@ -28,22 +29,27 @@ public class AbstractDistributionDownloadPluginTests { protected static final Version BWC_STAGED_VERSION = Version.fromString("1.0.0"); protected static final Version BWC_BUGFIX_VERSION = Version.fromString("1.0.1"); protected static final Version BWC_MAINTENANCE_VERSION = Version.fromString("0.90.1"); + protected static final List DEVELOPMENT_BRANCHES = Arrays.asList("main", "1.1", "1.0", "0.90"); protected static final BwcVersions BWC_MINOR = new BwcVersions( BWC_MAJOR_VERSION, - Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION) + Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION), + DEVELOPMENT_BRANCHES ); protected static final BwcVersions BWC_STAGED = new BwcVersions( BWC_MAJOR_VERSION, - Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION) + Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION), + DEVELOPMENT_BRANCHES ); protected static final BwcVersions BWC_BUGFIX = new BwcVersions( BWC_MAJOR_VERSION, - Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION) + Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION), + DEVELOPMENT_BRANCHES ); protected static final BwcVersions BWC_MAINTENANCE = new BwcVersions( BWC_MINOR_VERSION, - Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION) + Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION), + DEVELOPMENT_BRANCHES ); protected static String projectName(String base, boolean bundledJdk) { diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 29c5bc16a8c4a..ede1b392b8a41 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -17,6 +17,8 @@ jna = 5.12.1 netty = 4.1.115.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 +awsv1sdk = 1.12.270 +awsv2sdk = 2.28.13 antlr4 = 4.13.1 # bouncy castle version for non-fips. fips jars use a different version @@ -33,7 +35,7 @@ commonscodec = 1.15 protobuf = 3.25.5 # test dependencies -randomizedrunner = 2.8.0 +randomizedrunner = 2.8.2 junit = 4.13.2 junit5 = 5.7.1 hamcrest = 2.1 diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java index 2068ee4447971..2107156902487 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java @@ -9,11 +9,14 @@ package org.elasticsearch.gradle.test; +import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.invocation.Gradle; import org.gradle.api.tasks.testing.Test; +import java.util.List; + public class GradleTestPolicySetupPlugin implements Plugin { @Override @@ -23,8 +26,13 @@ public void apply(Project project) { test.systemProperty("tests.gradle", true); test.systemProperty("tests.task", test.getPath()); - // Flag is required for later Java versions since our tests use a custom security manager - test.jvmArgs("-Djava.security.manager=allow"); + test.getJvmArgumentProviders().add(() -> { + if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) { + return List.of("-Djava.security.manager=allow"); + } else { + return List.of(); + } + }); SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider(); // don't track these as inputs since they contain absolute paths and break cache relocatability diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index f3f8e4703eba2..07214b5fbf845 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -156,12 +156,12 @@ abstract class AbstractGradleFuncTest extends Specification { File internalBuild( List extraPlugins = [], - String bugfix = "7.15.2", - String bugfixLucene = "8.9.0", - String staged = "7.16.0", - String stagedLucene = "8.10.0", - String minor = "8.0.0", - String minorLucene = "9.0.0" + String maintenance = "7.16.10", + String bugfix2 = "8.1.3", + String bugfix = "8.2.1", + String staged = "8.3.0", + String minor = "8.4.0", + String current = "9.0.0" ) { buildFile << """plugins { id 'elasticsearch.global-build-info' @@ -172,15 +172,17 @@ abstract class AbstractGradleFuncTest extends Specification { import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.Version - Version currentVersion = Version.fromString("8.1.0") + Version currentVersion = Version.fromString("${current}") def versionList = [ + Version.fromString("$maintenance"), + Version.fromString("$bugfix2"), Version.fromString("$bugfix"), Version.fromString("$staged"), Version.fromString("$minor"), currentVersion ] - BwcVersions versions = new BwcVersions(currentVersion, versionList) + BwcVersions versions = new BwcVersions(currentVersion, versionList, ['main', '8.x', '8.3', '8.2', '8.1', '7.16']) buildParams.getBwcVersionsProperty().set(versions) """ } diff --git a/dev-tools/publish_zstd_binaries.sh b/dev-tools/publish_zstd_binaries.sh index d5be5c4aaec60..25d4aed6255be 100755 --- a/dev-tools/publish_zstd_binaries.sh +++ b/dev-tools/publish_zstd_binaries.sh @@ -79,8 +79,8 @@ build_linux_jar() { } echo 'Building Linux jars...' -LINUX_ARM_JAR=$(build_linux_jar "linux/amd64" "x86-64") -LINUX_X86_JAR=$(build_linux_jar "linux/arm64" "aarch64") +LINUX_ARM_JAR=$(build_linux_jar "linux/arm64" "aarch64") +LINUX_X86_JAR=$(build_linux_jar "linux/amd64" "x86-64") build_windows_jar() { ARTIFACT="$TEMP/zstd-$VERSION-windows-x86-64.jar" diff --git a/distribution/build.gradle b/distribution/build.gradle index bfbf10ac85e2f..e65d07dcfc2b4 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -70,10 +70,10 @@ tasks.register("generateDependenciesReport", ConcatFilesTask) { // Explicitly add the dependency on the RHEL UBI Docker base image String[] rhelUbiFields = [ 'Red Hat Universal Base Image minimal', - '8', - 'https://catalog.redhat.com/software/containers/ubi8/ubi-minimal/5c359a62bed8bd75a2c3fba8', + '9', + 'https://catalog.redhat.com/software/containers/ubi9-minimal/61832888c0d15aff4912fe0d', 'Custom;https://www.redhat.com/licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf', - 'https://oss-dependencies.elastic.co/red-hat-universal-base-image-minimal/8/ubi-minimal-8-source.tar.gz' + 'https://oss-dependencies.elastic.co/red-hat-universal-base-image-minimal/9/ubi-minimal-9-source.tar.gz' ] additionalLines << rhelUbiFields.join(',') } diff --git a/distribution/bwc/bugfix2/build.gradle b/distribution/bwc/bugfix2/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 36b5b03d9a110..bde4d9d17fc17 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -63,7 +63,7 @@ appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json appender.deprecation_rolling.layout.type = ECSJsonLayout # Intentionally follows a different pattern to above -appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.layout.dataset = elasticsearch.deprecation appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 57750f2162a71..dc2bcd96b8d9f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -25,8 +25,8 @@ dependencies { implementation project(":libs:plugin-api") implementation project(":libs:plugin-scanner") // TODO: asm is picked up from the plugin scanner, we should consolidate so it is not defined twice - implementation 'org.ow2.asm:asm:9.7' - implementation 'org.ow2.asm:asm-tree:9.7' + implementation 'org.ow2.asm:asm:9.7.1' + implementation 'org.ow2.asm:asm-tree:9.7.1' api "org.bouncycastle:bcpg-fips:1.0.7.1" api "org.bouncycastle:bc-fips:1.0.2.5" diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index b17ad7c87e3ff..fe0f82560894c 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.jdk.RuntimeVersionFeature; import java.io.IOException; import java.nio.file.Files; @@ -137,9 +139,13 @@ private static Stream maybeWorkaroundG1Bug() { return Stream.of(); } + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) private static Stream maybeAllowSecurityManager() { - // Will become conditional on useEntitlements once entitlements can run without SM - return Stream.of("-Djava.security.manager=allow"); + if (RuntimeVersionFeature.isSecurityManagerAvailable()) { + // Will become conditional on useEntitlements once entitlements can run without SM + return Stream.of("-Djava.security.manager=allow"); + } + return Stream.of(); } private static Stream maybeAttachEntitlementAgent(boolean useEntitlements) { diff --git a/docs/changelog/111104.yaml b/docs/changelog/111104.yaml new file mode 100644 index 0000000000000..a7dffdd0be221 --- /dev/null +++ b/docs/changelog/111104.yaml @@ -0,0 +1,6 @@ +pr: 111104 +summary: "ESQL: Enable async get to support formatting" +area: ES|QL +type: feature +issues: + - 110926 diff --git a/docs/changelog/114445.yaml b/docs/changelog/114445.yaml new file mode 100644 index 0000000000000..afbc080d1e0b9 --- /dev/null +++ b/docs/changelog/114445.yaml @@ -0,0 +1,6 @@ +pr: 114445 +summary: Wrap jackson exception on malformed json string +area: Infra/Core +type: bug +issues: + - 114142 diff --git a/docs/changelog/114618.yaml b/docs/changelog/114618.yaml new file mode 100644 index 0000000000000..ada402fe35742 --- /dev/null +++ b/docs/changelog/114618.yaml @@ -0,0 +1,5 @@ +pr: 114618 +summary: Add a new index setting to skip recovery source when synthetic source is enabled +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/116663.yaml b/docs/changelog/116663.yaml new file mode 100644 index 0000000000000..40bcdea29bc31 --- /dev/null +++ b/docs/changelog/116663.yaml @@ -0,0 +1,5 @@ +pr: 116663 +summary: KNN vector rescoring for quantized vectors +area: Vector Search +type: feature +issues: [] diff --git a/docs/changelog/116964.yaml b/docs/changelog/116964.yaml new file mode 100644 index 0000000000000..2e3ecd06fa098 --- /dev/null +++ b/docs/changelog/116964.yaml @@ -0,0 +1,6 @@ +pr: 116964 +summary: "Support ST_ENVELOPE and related (ST_XMIN, ST_XMAX, ST_YMIN, ST_YMAX) functions" +area: ES|QL +type: feature +issues: + - 104875 diff --git a/docs/changelog/117230.yaml b/docs/changelog/117230.yaml new file mode 100644 index 0000000000000..001dcef2fe3b1 --- /dev/null +++ b/docs/changelog/117230.yaml @@ -0,0 +1,5 @@ +pr: 117230 +summary: Make various alias retrieval APIs wait for cluster to unblock +area: Distributed +type: enhancement +issues: [] diff --git a/docs/changelog/117359.yaml b/docs/changelog/117359.yaml new file mode 100644 index 0000000000000..87d2d828ace54 --- /dev/null +++ b/docs/changelog/117359.yaml @@ -0,0 +1,5 @@ +pr: 117359 +summary: Term query for ES|QL +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/117469.yaml b/docs/changelog/117469.yaml new file mode 100644 index 0000000000000..cfb14f78cb578 --- /dev/null +++ b/docs/changelog/117469.yaml @@ -0,0 +1,6 @@ +pr: 117469 +summary: Handle exceptions in query phase can match +area: Search +type: bug +issues: + - 104994 diff --git a/docs/changelog/117555.yaml b/docs/changelog/117555.yaml new file mode 100644 index 0000000000000..7891ab6d93a64 --- /dev/null +++ b/docs/changelog/117555.yaml @@ -0,0 +1,5 @@ +pr: 117555 +summary: Expand type compatibility for match function and operator +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/117575.yaml b/docs/changelog/117575.yaml new file mode 100644 index 0000000000000..781444ae97be5 --- /dev/null +++ b/docs/changelog/117575.yaml @@ -0,0 +1,5 @@ +pr: 117575 +summary: Fix enrich cache size setting name +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/117589.yaml b/docs/changelog/117589.yaml new file mode 100644 index 0000000000000..e6880fd9477b5 --- /dev/null +++ b/docs/changelog/117589.yaml @@ -0,0 +1,5 @@ +pr: 117589 +summary: "Add Inference Unified API for chat completions for OpenAI" +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/117657.yaml b/docs/changelog/117657.yaml new file mode 100644 index 0000000000000..0a72e9dabe9e8 --- /dev/null +++ b/docs/changelog/117657.yaml @@ -0,0 +1,5 @@ +pr: 117657 +summary: Ignore cancellation exceptions +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/117701.yaml b/docs/changelog/117701.yaml new file mode 100644 index 0000000000000..5a72bdeb143e6 --- /dev/null +++ b/docs/changelog/117701.yaml @@ -0,0 +1,6 @@ +pr: 117701 +summary: Watcher history index has too many indexed fields - +area: Watcher +type: bug +issues: + - 71479 diff --git a/docs/changelog/117792.yaml b/docs/changelog/117792.yaml new file mode 100644 index 0000000000000..2d7ddda1ace40 --- /dev/null +++ b/docs/changelog/117792.yaml @@ -0,0 +1,6 @@ +pr: 117792 +summary: Address mapping and compute engine runtime field issues +area: Mapping +type: bug +issues: + - 117644 diff --git a/docs/changelog/117898.yaml b/docs/changelog/117898.yaml new file mode 100644 index 0000000000000..c60061abc49ff --- /dev/null +++ b/docs/changelog/117898.yaml @@ -0,0 +1,5 @@ +pr: 117898 +summary: Limit size of query +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/117914.yaml b/docs/changelog/117914.yaml new file mode 100644 index 0000000000000..da58ed7bb04b7 --- /dev/null +++ b/docs/changelog/117914.yaml @@ -0,0 +1,5 @@ +pr: 117914 +summary: Fix for propagating filters from compound to inner retrievers +area: Ranking +type: bug +issues: [] diff --git a/docs/changelog/117917.yaml b/docs/changelog/117917.yaml new file mode 100644 index 0000000000000..b6dc90f6b903d --- /dev/null +++ b/docs/changelog/117917.yaml @@ -0,0 +1,5 @@ +pr: 117917 +summary: Add option to store `sparse_vector` outside `_source` +area: Mapping +type: feature +issues: [] diff --git a/docs/changelog/117920.yaml b/docs/changelog/117920.yaml new file mode 100644 index 0000000000000..1bfddabd4462d --- /dev/null +++ b/docs/changelog/117920.yaml @@ -0,0 +1,6 @@ +pr: 117920 +summary: Wait for the worker service to shutdown before closing task processor +area: Machine Learning +type: bug +issues: + - 117563 diff --git a/docs/changelog/117933.yaml b/docs/changelog/117933.yaml new file mode 100644 index 0000000000000..92ae31afa30dd --- /dev/null +++ b/docs/changelog/117933.yaml @@ -0,0 +1,18 @@ +pr: 117933 +summary: Change `deprecation.elasticsearch` keyword to `elasticsearch.deprecation` +area: Infra/Logging +type: bug +issues: + - 83251 +breaking: + title: Deprecation logging value change for "data_stream.dataset" and "event.dataset" + area: Logging + details: |- + This change modifies the "data_stream.dataset" and "event.dataset" value for deprecation logging + to use the value `elasticsearch.deprecation` instead of `deprecation.elasticsearch`. This is now + consistent with other values where the name of the service is the first part of the key. + impact: |- + If you are directly consuming deprecation logs for "data_stream.dataset" and "event.dataset" and filtering on + this value, you will need to update your filters to use `elasticsearch.deprecation` instead of + `deprecation.elasticsearch`. + notable: false diff --git a/docs/changelog/117953.yaml b/docs/changelog/117953.yaml new file mode 100644 index 0000000000000..62f0218b1cdc7 --- /dev/null +++ b/docs/changelog/117953.yaml @@ -0,0 +1,5 @@ +pr: 117953 +summary: Acquire stats searcher for data stream stats +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/117963.yaml b/docs/changelog/117963.yaml new file mode 100644 index 0000000000000..4a50dc175786b --- /dev/null +++ b/docs/changelog/117963.yaml @@ -0,0 +1,5 @@ +pr: 117963 +summary: '`SearchServiceTests.testParseSourceValidation` failure' +area: Search +type: bug +issues: [] diff --git a/docs/changelog/117994.yaml b/docs/changelog/117994.yaml new file mode 100644 index 0000000000000..603f2d855a11a --- /dev/null +++ b/docs/changelog/117994.yaml @@ -0,0 +1,5 @@ +pr: 117994 +summary: Even better(er) binary quantization +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/118025.yaml b/docs/changelog/118025.yaml new file mode 100644 index 0000000000000..9b615f4d5e621 --- /dev/null +++ b/docs/changelog/118025.yaml @@ -0,0 +1,5 @@ +pr: 118025 +summary: Update sparse text embeddings API route for Inference Service +area: Inference +type: enhancement +issues: [] diff --git a/docs/changelog/118027.yaml b/docs/changelog/118027.yaml new file mode 100644 index 0000000000000..161c156b56a65 --- /dev/null +++ b/docs/changelog/118027.yaml @@ -0,0 +1,6 @@ +pr: 118027 +summary: Esql compare nanos and millis +area: ES|QL +type: enhancement +issues: + - 116281 diff --git a/docs/changelog/118064.yaml b/docs/changelog/118064.yaml new file mode 100644 index 0000000000000..7d12f365bf142 --- /dev/null +++ b/docs/changelog/118064.yaml @@ -0,0 +1,5 @@ +pr: 118064 +summary: Add Highlighter for Semantic Text Fields +area: Highlighting +type: feature +issues: [] diff --git a/docs/changelog/118094.yaml b/docs/changelog/118094.yaml new file mode 100644 index 0000000000000..a8866543fa7d2 --- /dev/null +++ b/docs/changelog/118094.yaml @@ -0,0 +1,5 @@ +pr: 118094 +summary: Update ASM 9.7 -> 9.7.1 to support JDK 24 +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/118104.yaml b/docs/changelog/118104.yaml new file mode 100644 index 0000000000000..eb8ac661e9f93 --- /dev/null +++ b/docs/changelog/118104.yaml @@ -0,0 +1,12 @@ +pr: 118104 +summary: Remove old `_knn_search` tech preview API in v9 +area: Vector Search +type: breaking +issues: [] +breaking: + title: Remove old `_knn_search` tech preview API in v9 + area: REST API + details: The original, tech-preview api for vector search, `_knn_search`, has been removed in v9. For all vector search + operations, you should utilize the `_search` endpoint. + impact: The `_knn_search` API is now inaccessible without providing a compatible-with flag for v8. + notable: false diff --git a/docs/changelog/118114.yaml b/docs/changelog/118114.yaml new file mode 100644 index 0000000000000..1b7532d5df981 --- /dev/null +++ b/docs/changelog/118114.yaml @@ -0,0 +1,5 @@ +pr: 118114 +summary: Enable physical plan verification +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118177.yaml b/docs/changelog/118177.yaml new file mode 100644 index 0000000000000..5201fec3db306 --- /dev/null +++ b/docs/changelog/118177.yaml @@ -0,0 +1,6 @@ +pr: 118177 +summary: Fixing bedrock event executor terminated cache issue +area: Machine Learning +type: bug +issues: + - 117916 diff --git a/docs/changelog/118192.yaml b/docs/changelog/118192.yaml new file mode 100644 index 0000000000000..03542048761d3 --- /dev/null +++ b/docs/changelog/118192.yaml @@ -0,0 +1,11 @@ +pr: 118192 +summary: Remove `client.type` setting +area: Infra/Core +type: breaking +issues: [104574] +breaking: + title: Remove `client.type` setting + area: Cluster and node setting + details: The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now removed. + impact: Remove the `client.type` setting from `elasticsearch.yml` + notable: false diff --git a/docs/changelog/118267.yaml b/docs/changelog/118267.yaml new file mode 100644 index 0000000000000..3e3920caeb0f9 --- /dev/null +++ b/docs/changelog/118267.yaml @@ -0,0 +1,5 @@ +pr: 118267 +summary: Adding get migration reindex status +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/118354.yaml b/docs/changelog/118354.yaml new file mode 100644 index 0000000000000..e2d72db121276 --- /dev/null +++ b/docs/changelog/118354.yaml @@ -0,0 +1,5 @@ +pr: 118354 +summary: Fix log message format bugs +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/118370.yaml b/docs/changelog/118370.yaml new file mode 100644 index 0000000000000..e6a429448e493 --- /dev/null +++ b/docs/changelog/118370.yaml @@ -0,0 +1,6 @@ +pr: 118370 +summary: Fix concurrency issue with `ReinitializingSourceProvider` +area: Mapping +type: bug +issues: + - 118238 diff --git a/docs/changelog/118378.yaml b/docs/changelog/118378.yaml new file mode 100644 index 0000000000000..d6c388b671968 --- /dev/null +++ b/docs/changelog/118378.yaml @@ -0,0 +1,5 @@ +pr: 118378 +summary: Opt into extra data stream resolution +area: ES|QL +type: bug +issues: [] diff --git a/docs/internal/DistributedArchitectureGuide.md b/docs/internal/DistributedArchitectureGuide.md index 793d38e3d73b3..11a2c860eb326 100644 --- a/docs/internal/DistributedArchitectureGuide.md +++ b/docs/internal/DistributedArchitectureGuide.md @@ -386,6 +386,9 @@ The tasks infrastructure is used to track currently executing operations in the Each individual task is local to a node, but can be related to other tasks, on the same node or other nodes, via a parent-child relationship. +> [!NOTE] +> The Task management API is experimental/beta, its status and outstanding issues can be tracked [here](https://github.com/elastic/elasticsearch/issues/51628). + ### Task tracking and registration Tasks are tracked in-memory on each node in the node's [TaskManager], new tasks are registered via one of the [TaskManager#register] methods. diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 0a167bf3f0240..217d88f361223 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -750,3 +750,39 @@ Which results in: ] } -------------------------------------------------- + +[[analysis-kuromoji-completion]] +==== `kuromoji_completion` token filter + +The `kuromoji_completion` token filter adds Japanese romanized tokens to the term attributes along with the original tokens (surface forms). + +[source,console] +-------------------------------------------------- +GET _analyze +{ + "analyzer": "kuromoji_completion", + "text": "寿司" <1> +} +-------------------------------------------------- + +<1> Returns `寿司`, `susi` (Kunrei-shiki) and `sushi` (Hepburn-shiki). + +The `kuromoji_completion` token filter accepts the following settings: + +`mode`:: ++ +-- + +The tokenization mode determines how the tokenizer handles compound and +unknown words. It can be set to: + +`index`:: + + Simple romanization. Expected to be used when indexing. + +`query`:: + + Input Method aware romanization. Expected to be used when querying. + +Defaults to `index`. +-- diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index 0d3e76f71d238..9eb3bf07fbd30 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -475,7 +475,7 @@ The input is untokenized text and the result is the single term attribute emitte - 영영칠 -> 7 - 일영영영 -> 1000 - 삼천2백2십삼 -> 3223 -- 조육백만오천일 -> 1000006005001 +- 일조육백만오천일 -> 1000006005001 - 3.2천 -> 3200 - 1.2만345.67 -> 12345.67 - 4,647.100 -> 4647.1 diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 41ac279d3b2f5..aab0c9df25ed4 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -51,6 +51,8 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-v] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] + [[cat-alias-api-example]] ==== {api-examples-title} diff --git a/docs/reference/connector/docs/connectors-box.asciidoc b/docs/reference/connector/docs/connectors-box.asciidoc index 07e4308d67c20..3e95f15d16ccd 100644 --- a/docs/reference/connector/docs/connectors-box.asciidoc +++ b/docs/reference/connector/docs/connectors-box.asciidoc @@ -54,7 +54,7 @@ For additional operations, see <>. ====== Box Free Account [discrete#es-connectors-box-create-oauth-custom-app] -======= Create Box User Authentication (OAuth 2.0) Custom App +*Create Box User Authentication (OAuth 2.0) Custom App* You'll need to create an OAuth app in the Box developer console by following these steps: @@ -64,7 +64,7 @@ You'll need to create an OAuth app in the Box developer console by following the 4. Once the app is created, *Client ID* and *Client secret* values are available in the configuration tab. Keep these handy. [discrete#es-connectors-box-connector-generate-a-refresh-token] -======= Generate a refresh Token +*Generate a refresh Token* To generate a refresh token, follow these steps: @@ -97,7 +97,7 @@ Save the refresh token from the response. You'll need this for the connector con ====== Box Enterprise Account [discrete#es-connectors-box-connector-create-box-server-authentication-client-credentials-grant-custom-app] -======= Create Box Server Authentication (Client Credentials Grant) Custom App +*Create Box Server Authentication (Client Credentials Grant) Custom App* 1. Register a new app in the https://app.box.com/developers/console[Box dev console] with custom App and select Server Authentication (Client Credentials Grant). 2. Check following permissions: @@ -224,7 +224,7 @@ For additional operations, see <>. ====== Box Free Account [discrete#es-connectors-box-client-create-oauth-custom-app] -======= Create Box User Authentication (OAuth 2.0) Custom App +*Create Box User Authentication (OAuth 2.0) Custom App* You'll need to create an OAuth app in the Box developer console by following these steps: @@ -234,7 +234,7 @@ You'll need to create an OAuth app in the Box developer console by following the 4. Once the app is created, *Client ID* and *Client secret* values are available in the configuration tab. Keep these handy. [discrete#es-connectors-box-client-connector-generate-a-refresh-token] -======= Generate a refresh Token +*Generate a refresh Token* To generate a refresh token, follow these steps: @@ -267,7 +267,7 @@ Save the refresh token from the response. You'll need this for the connector con ====== Box Enterprise Account [discrete#es-connectors-box-client-connector-create-box-server-authentication-client-credentials-grant-custom-app] -======= Create Box Server Authentication (Client Credentials Grant) Custom App +*Create Box Server Authentication (Client Credentials Grant) Custom App* 1. Register a new app in the https://app.box.com/developers/console[Box dev console] with custom App and select Server Authentication (Client Credentials Grant). 2. Check following permissions: diff --git a/docs/reference/connector/docs/connectors-content-extraction.asciidoc b/docs/reference/connector/docs/connectors-content-extraction.asciidoc index 5d2a9550a7c3c..a87d38c9bf531 100644 --- a/docs/reference/connector/docs/connectors-content-extraction.asciidoc +++ b/docs/reference/connector/docs/connectors-content-extraction.asciidoc @@ -183,7 +183,7 @@ Be aware that the self-managed connector will download files with randomized fil For that reason, we recommend using a dedicated directory for self-hosted extraction. [discrete#es-connectors-content-extraction-data-extraction-service-file-pointers-configuration-example] -======= Example +*Example* 1. For this example, we will be using `/app/files` as both our local directory and our container directory. When you run the extraction service docker container, you can mount the directory as a volume using the command-line option `-v /app/files:/app/files`. @@ -228,7 +228,7 @@ When using self-hosted extraction from a dockerized self-managed connector, ther * The self-managed connector and the extraction service will also need to share a volume. You can decide what directory inside these docker containers the volume will be mounted onto, but the directory must be the same for both docker containers. [discrete#es-connectors-content-extraction-data-extraction-service-file-pointers-configuration-dockerized-example] -======= Example +*Example* 1. First, set up a volume for the two docker containers to share. This will be where files are downloaded into and then extracted from. diff --git a/docs/reference/connector/docs/connectors-dropbox.asciidoc b/docs/reference/connector/docs/connectors-dropbox.asciidoc index 1f80a0ab4e952..295b7e2936625 100644 --- a/docs/reference/connector/docs/connectors-dropbox.asciidoc +++ b/docs/reference/connector/docs/connectors-dropbox.asciidoc @@ -190,7 +190,7 @@ When both are provided, priority is given to `file_categories`. We have some examples below for illustration. [discrete#es-connectors-dropbox-sync-rules-advanced-example-1] -======= Example: Query only +*Example: Query only* [source,js] ---- @@ -206,7 +206,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-sync-rules-advanced-example-2] -======= Example: Query with file extension filter +*Example: Query with file extension filter* [source,js] ---- @@ -225,7 +225,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-sync-rules-advanced-example-3] -======= Example: Query with file category filter +*Example: Query with file category filter* [source,js] ---- @@ -248,7 +248,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-sync-rules-advanced-limitations] -======= Limitations +*Limitations* * Content extraction is not supported for Dropbox *Paper* files when advanced sync rules are enabled. @@ -474,7 +474,7 @@ When both are provided, priority is given to `file_categories`. We have some examples below for illustration. [discrete#es-connectors-dropbox-client-sync-rules-advanced-example-1] -======= Example: Query only +*Example: Query only* [source,js] ---- @@ -490,7 +490,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-client-sync-rules-advanced-example-2] -======= Example: Query with file extension filter +*Example: Query with file extension filter* [source,js] ---- @@ -509,7 +509,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-client-sync-rules-advanced-example-3] -======= Example: Query with file category filter +*Example: Query with file category filter* [source,js] ---- @@ -532,7 +532,7 @@ We have some examples below for illustration. // NOTCONSOLE [discrete#es-connectors-dropbox-client-sync-rules-advanced-limitations] -======= Limitations +*Limitations* * Content extraction is not supported for Dropbox *Paper* files when advanced sync rules are enabled. diff --git a/docs/reference/connector/docs/connectors-github.asciidoc b/docs/reference/connector/docs/connectors-github.asciidoc index aa683e4bb0829..df577d83e8121 100644 --- a/docs/reference/connector/docs/connectors-github.asciidoc +++ b/docs/reference/connector/docs/connectors-github.asciidoc @@ -210,7 +210,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-github-sync-rules-advanced-branch] -======= Indexing document and files based on branch name configured via branch key +*Indexing document and files based on branch name configured via branch key* [source,js] ---- @@ -226,7 +226,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-sync-rules-advanced-issue-key] -======= Indexing document based on issue query related to bugs via issue key +*Indexing document based on issue query related to bugs via issue key* [source,js] ---- @@ -242,7 +242,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-sync-rules-advanced-pr-key] -======= Indexing document based on PR query related to open PR's via PR key +*Indexing document based on PR query related to open PR's via PR key* [source,js] ---- @@ -258,7 +258,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-sync-rules-advanced-issue-query-branch-name] -======= Indexing document and files based on queries and branch name +*Indexing document and files based on queries and branch name* [source,js] ---- @@ -283,7 +283,7 @@ Check the Elasticsearch index for the actual document count. ==== [discrete#es-connectors-github-sync-rules-advanced-overlapping] -======= Advanced rules for overlapping +*Advanced rules for overlapping* [source,js] ---- @@ -550,7 +550,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-github-client-sync-rules-advanced-branch] -======= Indexing document and files based on branch name configured via branch key +*Indexing document and files based on branch name configured via branch key* [source,js] ---- @@ -566,7 +566,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-client-sync-rules-advanced-issue-key] -======= Indexing document based on issue query related to bugs via issue key +*Indexing document based on issue query related to bugs via issue key* [source,js] ---- @@ -582,7 +582,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-client-sync-rules-advanced-pr-key] -======= Indexing document based on PR query related to open PR's via PR key +*Indexing document based on PR query related to open PR's via PR key* [source,js] ---- @@ -598,7 +598,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-github-client-sync-rules-advanced-issue-query-branch-name] -======= Indexing document and files based on queries and branch name +*Indexing document and files based on queries and branch name* [source,js] ---- @@ -623,7 +623,7 @@ Check the Elasticsearch index for the actual document count. ==== [discrete#es-connectors-github-client-sync-rules-advanced-overlapping] -======= Advanced rules for overlapping +*Advanced rules for overlapping* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-ms-sql.asciidoc b/docs/reference/connector/docs/connectors-ms-sql.asciidoc index 47fb282b16877..d706af8ca8043 100644 --- a/docs/reference/connector/docs/connectors-ms-sql.asciidoc +++ b/docs/reference/connector/docs/connectors-ms-sql.asciidoc @@ -196,7 +196,7 @@ Here are a few examples of advanced sync rules for this connector. ==== [discrete#es-connectors-ms-sql-sync-rules-advanced-queries] -======= Example: Two queries +*Example: Two queries* These rules fetch all records from both the `employee` and `customer` tables. The data from these tables will be synced separately to Elasticsearch. @@ -220,7 +220,7 @@ These rules fetch all records from both the `employee` and `customer` tables. Th // NOTCONSOLE [discrete#es-connectors-ms-sql-sync-rules-example-one-where] -======= Example: One WHERE query +*Example: One WHERE query* This rule fetches only the records from the `employee` table where the `emp_id` is greater than 5. Only these filtered records will be synced to Elasticsearch. @@ -236,7 +236,7 @@ This rule fetches only the records from the `employee` table where the `emp_id` // NOTCONSOLE [discrete#es-connectors-ms-sql-sync-rules-example-one-join] -======= Example: One JOIN query +*Example: One JOIN query* This rule fetches records by performing an INNER JOIN between the `employee` and `customer` tables on the condition that the `emp_id` in `employee` matches the `c_id` in `customer`. The result of this combined data will be synced to Elasticsearch. @@ -484,7 +484,7 @@ Here are a few examples of advanced sync rules for this connector. ==== [discrete#es-connectors-ms-sql-client-sync-rules-advanced-queries] -======= Example: Two queries +*Example: Two queries* These rules fetch all records from both the `employee` and `customer` tables. The data from these tables will be synced separately to Elasticsearch. @@ -508,7 +508,7 @@ These rules fetch all records from both the `employee` and `customer` tables. Th // NOTCONSOLE [discrete#es-connectors-ms-sql-client-sync-rules-example-one-where] -======= Example: One WHERE query +*Example: One WHERE query* This rule fetches only the records from the `employee` table where the `emp_id` is greater than 5. Only these filtered records will be synced to Elasticsearch. @@ -524,7 +524,7 @@ This rule fetches only the records from the `employee` table where the `emp_id` // NOTCONSOLE [discrete#es-connectors-ms-sql-client-sync-rules-example-one-join] -======= Example: One JOIN query +*Example: One JOIN query* This rule fetches records by performing an INNER JOIN between the `employee` and `customer` tables on the condition that the `emp_id` in `employee` matches the `c_id` in `customer`. The result of this combined data will be synced to Elasticsearch. diff --git a/docs/reference/connector/docs/connectors-network-drive.asciidoc b/docs/reference/connector/docs/connectors-network-drive.asciidoc index 91c9d3b28c385..909e3440c9f02 100644 --- a/docs/reference/connector/docs/connectors-network-drive.asciidoc +++ b/docs/reference/connector/docs/connectors-network-drive.asciidoc @@ -174,7 +174,7 @@ Advanced sync rules for this connector use *glob patterns*. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-network-drive-indexing-files-and-folders-recursively-within-folders] -======= Indexing files and folders recursively within folders +*Indexing files and folders recursively within folders* [source,js] ---- @@ -190,7 +190,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-indexing-files-and-folders-directly-inside-folder] -======= Indexing files and folders directly inside folder +*Indexing files and folders directly inside folder* [source,js] ---- @@ -203,7 +203,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-indexing-files-and-folders-directly-inside-a-set-of-folders] -======= Indexing files and folders directly inside a set of folders +*Indexing files and folders directly inside a set of folders* [source,js] ---- @@ -216,7 +216,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-excluding-files-and-folders-that-match-a-pattern] -======= Excluding files and folders that match a pattern +*Excluding files and folders that match a pattern* [source,js] ---- @@ -432,7 +432,7 @@ Advanced sync rules for this connector use *glob patterns*. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-network-drive-client-indexing-files-and-folders-recursively-within-folders] -======= Indexing files and folders recursively within folders +*Indexing files and folders recursively within folders* [source,js] ---- @@ -448,7 +448,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-client-indexing-files-and-folders-directly-inside-folder] -======= Indexing files and folders directly inside folder +*Indexing files and folders directly inside folder* [source,js] ---- @@ -461,7 +461,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-client-indexing-files-and-folders-directly-inside-a-set-of-folders] -======= Indexing files and folders directly inside a set of folders +*Indexing files and folders directly inside a set of folders* [source,js] ---- @@ -474,7 +474,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-network-drive-client-excluding-files-and-folders-that-match-a-pattern] -======= Excluding files and folders that match a pattern +*Excluding files and folders that match a pattern* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-notion.asciidoc b/docs/reference/connector/docs/connectors-notion.asciidoc index 2d7a71bff20de..7c08c5d81e032 100644 --- a/docs/reference/connector/docs/connectors-notion.asciidoc +++ b/docs/reference/connector/docs/connectors-notion.asciidoc @@ -140,7 +140,7 @@ Advanced sync rules for Notion take the following parameters: ====== Examples [discrete] -======= Example 1 +*Example 1* Indexing every page where the title contains `Demo Page`: @@ -160,7 +160,7 @@ Indexing every page where the title contains `Demo Page`: // NOTCONSOLE [discrete] -======= Example 2 +*Example 2* Indexing every database where the title contains `Demo Database`: @@ -180,7 +180,7 @@ Indexing every database where the title contains `Demo Database`: // NOTCONSOLE [discrete] -======= Example 3 +*Example 3* Indexing every database where the title contains `Demo Database` and every page where the title contains `Demo Page`: @@ -206,7 +206,7 @@ Indexing every database where the title contains `Demo Database` and every page // NOTCONSOLE [discrete] -======= Example 4 +*Example 4* Indexing all pages in the workspace: @@ -226,7 +226,7 @@ Indexing all pages in the workspace: // NOTCONSOLE [discrete] -======= Example 5 +*Example 5* Indexing all the pages and databases connected to the workspace: @@ -243,7 +243,7 @@ Indexing all the pages and databases connected to the workspace: // NOTCONSOLE [discrete] -======= Example 6 +*Example 6* Indexing all the rows of a database where the record is `true` for the column `Task completed` and its property(datatype) is a checkbox: @@ -266,7 +266,7 @@ Indexing all the rows of a database where the record is `true` for the column `T // NOTCONSOLE [discrete] -======= Example 7 +*Example 7* Indexing all rows of a specific database: @@ -283,7 +283,7 @@ Indexing all rows of a specific database: // NOTCONSOLE [discrete] -======= Example 8 +*Example 8* Indexing all blocks defined in `searches` and `database_query_filters`: @@ -498,7 +498,7 @@ Advanced sync rules for Notion take the following parameters: ====== Examples [discrete] -======= Example 1 +*Example 1* Indexing every page where the title contains `Demo Page`: @@ -518,7 +518,7 @@ Indexing every page where the title contains `Demo Page`: // NOTCONSOLE [discrete] -======= Example 2 +*Example 2* Indexing every database where the title contains `Demo Database`: @@ -538,7 +538,7 @@ Indexing every database where the title contains `Demo Database`: // NOTCONSOLE [discrete] -======= Example 3 +*Example 3* Indexing every database where the title contains `Demo Database` and every page where the title contains `Demo Page`: @@ -564,7 +564,7 @@ Indexing every database where the title contains `Demo Database` and every page // NOTCONSOLE [discrete] -======= Example 4 +*Example 4* Indexing all pages in the workspace: @@ -584,7 +584,7 @@ Indexing all pages in the workspace: // NOTCONSOLE [discrete] -======= Example 5 +*Example 5* Indexing all the pages and databases connected to the workspace: @@ -601,7 +601,7 @@ Indexing all the pages and databases connected to the workspace: // NOTCONSOLE [discrete] -======= Example 6 +*Example 6* Indexing all the rows of a database where the record is `true` for the column `Task completed` and its property(datatype) is a checkbox: @@ -624,7 +624,7 @@ Indexing all the rows of a database where the record is `true` for the column `T // NOTCONSOLE [discrete] -======= Example 7 +*Example 7* Indexing all rows of a specific database: @@ -641,7 +641,7 @@ Indexing all rows of a specific database: // NOTCONSOLE [discrete] -======= Example 8 +*Example 8* Indexing all blocks defined in `searches` and `database_query_filters`: diff --git a/docs/reference/connector/docs/connectors-onedrive.asciidoc b/docs/reference/connector/docs/connectors-onedrive.asciidoc index 7d1a21aeb78db..44ac96e2ad99d 100644 --- a/docs/reference/connector/docs/connectors-onedrive.asciidoc +++ b/docs/reference/connector/docs/connectors-onedrive.asciidoc @@ -160,7 +160,7 @@ A <> is required for advanced sync rul Here are a few examples of advanced sync rules for this connector. [discrete#es-connectors-onedrive-sync-rules-advanced-examples-1] -======= Example 1 +*Example 1* This rule skips indexing for files with `.xlsx` and `.docx` extensions. All other files and folders will be indexed. @@ -176,7 +176,7 @@ All other files and folders will be indexed. // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-2] -======= Example 2 +*Example 2* This rule focuses on indexing files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com` but excludes files with `.py` extension. @@ -192,7 +192,7 @@ This rule focuses on indexing files and folders owned by `user1-domain@onmicroso // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-3] -======= Example 3 +*Example 3* This rule indexes only the files and folders directly inside the root folder, excluding any `.md` files. @@ -208,7 +208,7 @@ This rule indexes only the files and folders directly inside the root folder, ex // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-4] -======= Example 4 +*Example 4* This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and `user3-domain@onmicrosoft.com` that are directly inside the `abc` folder, which is a subfolder of any folder under the `hello` directory in the root. Files with extensions `.pdf` and `.py` are excluded. @@ -225,7 +225,7 @@ This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-5] -======= Example 5 +*Example 5* This example contains two rules. The first rule indexes all files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`. @@ -245,7 +245,7 @@ The second rule indexes files for all other users, but skips files with a `.py` // NOTCONSOLE [discrete#es-connectors-onedrive-sync-rules-advanced-examples-6] -======= Example 6 +*Example 6* This example contains two rules. The first rule indexes all files owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`, excluding `.md` files. @@ -449,7 +449,7 @@ A <> is required for advanced sync rul Here are a few examples of advanced sync rules for this connector. [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-1] -======= Example 1 +*Example 1* This rule skips indexing for files with `.xlsx` and `.docx` extensions. All other files and folders will be indexed. @@ -465,7 +465,7 @@ All other files and folders will be indexed. // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-2] -======= Example 2 +*Example 2* This rule focuses on indexing files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com` but excludes files with `.py` extension. @@ -481,7 +481,7 @@ This rule focuses on indexing files and folders owned by `user1-domain@onmicroso // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-3] -======= Example 3 +*Example 3* This rule indexes only the files and folders directly inside the root folder, excluding any `.md` files. @@ -497,7 +497,7 @@ This rule indexes only the files and folders directly inside the root folder, ex // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-4] -======= Example 4 +*Example 4* This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and `user3-domain@onmicrosoft.com` that are directly inside the `abc` folder, which is a subfolder of any folder under the `hello` directory in the root. Files with extensions `.pdf` and `.py` are excluded. @@ -514,7 +514,7 @@ This rule indexes files and folders owned by `user1-domain@onmicrosoft.com` and // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-5] -======= Example 5 +*Example 5* This example contains two rules. The first rule indexes all files and folders owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`. @@ -534,7 +534,7 @@ The second rule indexes files for all other users, but skips files with a `.py` // NOTCONSOLE [discrete#es-connectors-onedrive-client-sync-rules-advanced-examples-6] -======= Example 6 +*Example 6* This example contains two rules. The first rule indexes all files owned by `user1-domain@onmicrosoft.com` and `user2-domain@onmicrosoft.com`, excluding `.md` files. diff --git a/docs/reference/connector/docs/connectors-postgresql.asciidoc b/docs/reference/connector/docs/connectors-postgresql.asciidoc index 1fe28f867337c..aa6cb7f29e633 100644 --- a/docs/reference/connector/docs/connectors-postgresql.asciidoc +++ b/docs/reference/connector/docs/connectors-postgresql.asciidoc @@ -188,7 +188,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. Here is some example data that will be used in the following examples. [discrete#connectors-postgresql-sync-rules-advanced-example-data-1] -======= `employee` table +*`employee` table* [cols="3*", options="header"] |=== @@ -199,7 +199,7 @@ Here is some example data that will be used in the following examples. |=== [discrete#connectors-postgresql-sync-rules-advanced-example-2] -======= `customer` table +*`customer` table* [cols="3*", options="header"] |=== @@ -213,7 +213,7 @@ Here is some example data that will be used in the following examples. ====== Advanced sync rules examples [discrete#connectors-postgresql-sync-rules-advanced-examples-1] -======= Multiple table queries +*Multiple table queries* [source,js] ---- @@ -235,7 +235,7 @@ Here is some example data that will be used in the following examples. // NOTCONSOLE [discrete#connectors-postgresql-sync-rules-advanced-examples-1-id-columns] -======= Multiple table queries with `id_columns` +*Multiple table queries with `id_columns`* In 8.15.0, we added a new optional `id_columns` field in our advanced sync rules for the PostgreSQL connector. Use the `id_columns` field to ingest tables which do not have a primary key. Include the names of unique fields so that the connector can use them to generate unique IDs for documents. @@ -264,7 +264,7 @@ Use the `id_columns` field to ingest tables which do not have a primary key. Inc This example uses the `id_columns` field to specify the unique fields `emp_id` and `c_id` for the `employee` and `customer` tables, respectively. [discrete#connectors-postgresql-sync-rules-advanced-examples-2] -======= Filtering data with `WHERE` clause +*Filtering data with `WHERE` clause* [source,js] ---- @@ -278,7 +278,7 @@ This example uses the `id_columns` field to specify the unique fields `emp_id` a // NOTCONSOLE [discrete#connectors-postgresql-sync-rules-advanced-examples-3] -======= `JOIN` operations +*`JOIN` operations* [source,js] ---- @@ -494,7 +494,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. Here is some example data that will be used in the following examples. [discrete#es-connectors-postgresql-client-sync-rules-advanced-example-data-1] -======= `employee` table +*`employee` table* [cols="3*", options="header"] |=== @@ -505,7 +505,7 @@ Here is some example data that will be used in the following examples. |=== [discrete#es-connectors-postgresql-client-sync-rules-advanced-example-2] -======= `customer` table +*`customer` table* [cols="3*", options="header"] |=== @@ -519,7 +519,7 @@ Here is some example data that will be used in the following examples. ====== Advanced sync rules examples [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-1] -======== Multiple table queries +*Multiple table queries* [source,js] ---- @@ -541,7 +541,7 @@ Here is some example data that will be used in the following examples. // NOTCONSOLE [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-1-id-columns] -======== Multiple table queries with `id_columns` +*Multiple table queries with `id_columns`* In 8.15.0, we added a new optional `id_columns` field in our advanced sync rules for the PostgreSQL connector. Use the `id_columns` field to ingest tables which do not have a primary key. Include the names of unique fields so that the connector can use them to generate unique IDs for documents. @@ -570,7 +570,7 @@ Use the `id_columns` field to ingest tables which do not have a primary key. Inc This example uses the `id_columns` field to specify the unique fields `emp_id` and `c_id` for the `employee` and `customer` tables, respectively. [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-2] -======== Filtering data with `WHERE` clause +*Filtering data with `WHERE` clause* [source,js] ---- @@ -584,7 +584,7 @@ This example uses the `id_columns` field to specify the unique fields `emp_id` a // NOTCONSOLE [discrete#es-connectors-postgresql-client-sync-rules-advanced-examples-3] -======== `JOIN` operations +*`JOIN` operations* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-s3.asciidoc b/docs/reference/connector/docs/connectors-s3.asciidoc index b4d08d3884631..90c070f7b8044 100644 --- a/docs/reference/connector/docs/connectors-s3.asciidoc +++ b/docs/reference/connector/docs/connectors-s3.asciidoc @@ -118,7 +118,7 @@ The connector will fetch file and folder data that matches the string. Defaults to `""` (syncs all bucket objects). [discrete#es-connectors-s3-sync-rules-advanced-examples] -======= Advanced sync rules examples +*Advanced sync rules examples* *Fetching files and folders recursively by prefix* @@ -336,7 +336,7 @@ The connector will fetch file and folder data that matches the string. Defaults to `""` (syncs all bucket objects). [discrete#es-connectors-s3-client-sync-rules-advanced-examples] -======= Advanced sync rules examples +*Advanced sync rules examples* *Fetching files and folders recursively by prefix* diff --git a/docs/reference/connector/docs/connectors-salesforce.asciidoc b/docs/reference/connector/docs/connectors-salesforce.asciidoc index 3676f7663089c..c640751de92c0 100644 --- a/docs/reference/connector/docs/connectors-salesforce.asciidoc +++ b/docs/reference/connector/docs/connectors-salesforce.asciidoc @@ -227,7 +227,7 @@ They take the following parameters: Allowed values are *SOQL* and *SOSL*. [discrete#es-connectors-salesforce-sync-rules-advanced-fetch-query-language] -======= Fetch documents based on the query and language specified +*Fetch documents based on the query and language specified* **Example**: Fetch documents using SOQL query @@ -256,7 +256,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-sync-rules-advanced-fetch-objects] -======= Fetch standard and custom objects using SOQL and SOSL queries +*Fetch standard and custom objects using SOQL and SOSL queries* **Example**: Fetch documents for standard objects via SOQL and SOSL query. @@ -293,7 +293,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-sync-rules-advanced-fetch-standard-custom-fields] -======= Fetch documents with standard and custom fields +*Fetch documents with standard and custom fields* **Example**: Fetch documents with all standard and custom fields for Account object. @@ -626,7 +626,7 @@ They take the following parameters: Allowed values are *SOQL* and *SOSL*. [discrete#es-connectors-salesforce-client-sync-rules-advanced-fetch-query-language] -======= Fetch documents based on the query and language specified +*Fetch documents based on the query and language specified* **Example**: Fetch documents using SOQL query @@ -655,7 +655,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-client-sync-rules-advanced-fetch-objects] -======= Fetch standard and custom objects using SOQL and SOSL queries +*Fetch standard and custom objects using SOQL and SOSL queries* **Example**: Fetch documents for standard objects via SOQL and SOSL query. @@ -692,7 +692,7 @@ Allowed values are *SOQL* and *SOSL*. // NOTCONSOLE [discrete#es-connectors-salesforce-client-sync-rules-advanced-fetch-standard-custom-fields] -======= Fetch documents with standard and custom fields +*Fetch documents with standard and custom fields* **Example**: Fetch documents with all standard and custom fields for Account object. diff --git a/docs/reference/connector/docs/connectors-servicenow.asciidoc b/docs/reference/connector/docs/connectors-servicenow.asciidoc index a02c418f11d74..3dc98ed9a44c9 100644 --- a/docs/reference/connector/docs/connectors-servicenow.asciidoc +++ b/docs/reference/connector/docs/connectors-servicenow.asciidoc @@ -167,7 +167,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-servicenow-sync-rules-number-incident-service] -======= Indexing document based on incident number for Incident service +*Indexing document based on incident number for Incident service* [source,js] ---- @@ -181,7 +181,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-sync-rules-active-false-user-service] -======= Indexing document based on user activity state for User service +*Indexing document based on user activity state for User service* [source,js] ---- @@ -195,7 +195,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-sync-rules-author-administrator-knowledge-service] -======= Indexing document based on author name for Knowledge service +*Indexing document based on author name for Knowledge service* [source,js] ---- @@ -407,7 +407,7 @@ Advanced sync rules are defined through a source-specific DSL JSON snippet. The following sections provide examples of advanced sync rules for this connector. [discrete#es-connectors-servicenow-client-sync-rules-number-incident-service] -======= Indexing document based on incident number for Incident service +*Indexing document based on incident number for Incident service* [source,js] ---- @@ -421,7 +421,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-client-sync-rules-active-false-user-service] -======= Indexing document based on user activity state for User service +*Indexing document based on user activity state for User service* [source,js] ---- @@ -435,7 +435,7 @@ The following sections provide examples of advanced sync rules for this connecto // NOTCONSOLE [discrete#es-connectors-servicenow-client-sync-rules-author-administrator-knowledge-service] -======= Indexing document based on author name for Knowledge service +*Indexing document based on author name for Knowledge service* [source,js] ---- diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 21d0890e436c5..02f598c16f63c 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -277,7 +277,7 @@ Example: This rule will not extract content of any drive items (files in document libraries) that haven't been modified for 60 days or more. [discrete#es-connectors-sharepoint-online-sync-rules-limitations] -======= Limitations of sync rules with incremental syncs +*Limitations of sync rules with incremental syncs* Changing sync rules after Sharepoint Online content has already been indexed can bring unexpected results, when using <>. @@ -288,7 +288,7 @@ Incremental syncs ensure _updates_ from 3rd-party system, but do not modify exis Let's take a look at several examples where incremental syncs might lead to inconsistent data on your index. [discrete#es-connectors-sharepoint-online-sync-rules-limitations-restrictive-added] -======== Example: Restrictive basic sync rule added after a full sync +*Example: Restrictive basic sync rule added after a full sync* Imagine your Sharepoint Online drive contains the following drive items: @@ -322,7 +322,7 @@ If no files were changed, incremental sync will not receive information about ch After a *full sync*, the index will be updated and files that are excluded by sync rules will be removed. [discrete#es-connectors-sharepoint-online-sync-rules-limitations-restrictive-removed] -======== Example: Restrictive basic sync rules removed after a full sync +*Example: Restrictive basic sync rules removed after a full sync* Imagine that Sharepoint Online drive has the following drive items: @@ -354,7 +354,7 @@ Afterwards, we can remove the filtering rule and run an incremental sync. If no Only a *full sync* will include the items previously ignored by the sync rule. [discrete#es-connectors-sharepoint-online-sync-rules-limitations-restrictive-changed] -======== Example: Advanced sync rules edge case +*Example: Advanced sync rules edge case* Advanced sync rules can be applied to limit which documents will have content extracted. For example, it's possible to set a rule so that documents older than 180 days won't have content extracted. @@ -763,7 +763,7 @@ Example: This rule will not extract content of any drive items (files in document libraries) that haven't been modified for 60 days or more. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations] -======= Limitations of sync rules with incremental syncs +*Limitations of sync rules with incremental syncs* Changing sync rules after Sharepoint Online content has already been indexed can bring unexpected results, when using <>. @@ -774,7 +774,7 @@ Incremental syncs ensure _updates_ from 3rd-party system, but do not modify exis Let's take a look at several examples where incremental syncs might lead to inconsistent data on your index. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations-restrictive-added] -======== Example: Restrictive basic sync rule added after a full sync +*Example: Restrictive basic sync rule added after a full sync* Imagine your Sharepoint Online drive contains the following drive items: @@ -808,7 +808,7 @@ If no files were changed, incremental sync will not receive information about ch After a *full sync*, the index will be updated and files that are excluded by sync rules will be removed. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations-restrictive-removed] -======== Example: Restrictive basic sync rules removed after a full sync +*Example: Restrictive basic sync rules removed after a full sync* Imagine that Sharepoint Online drive has the following drive items: @@ -840,7 +840,7 @@ Afterwards, we can remove the filtering rule and run an incremental sync. If no Only a *full sync* will include the items previously ignored by the sync rule. [discrete#es-connectors-sharepoint-online-client-sync-rules-limitations-restrictive-changed] -======== Example: Advanced sync rules edge case +*Example: Advanced sync rules edge case* Advanced sync rules can be applied to limit which documents will have content extracted. For example, it's possible to set a rule so that documents older than 180 days won't have content extracted. diff --git a/docs/reference/connector/docs/images/connectors-overview.png b/docs/reference/connector/docs/images/connectors-overview.png deleted file mode 100644 index 4d0edfeb6adae..0000000000000 Binary files a/docs/reference/connector/docs/images/connectors-overview.png and /dev/null differ diff --git a/docs/reference/connector/docs/images/connectors-overview.svg b/docs/reference/connector/docs/images/connectors-overview.svg new file mode 100644 index 0000000000000..0a7fb30c61d6d --- /dev/null +++ b/docs/reference/connector/docs/images/connectors-overview.svg @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/connector/docs/index.asciidoc b/docs/reference/connector/docs/index.asciidoc index 481e124a1a117..dfca45f86ebce 100644 --- a/docs/reference/connector/docs/index.asciidoc +++ b/docs/reference/connector/docs/index.asciidoc @@ -72,7 +72,7 @@ Refer to <> for details. The following diagram provides a high-level overview of the Elastic connectors offering and some key facts. -image::connectors-overview.png[align="center",width="100%"] +image::connectors-overview.svg[align="center",width="100%"] [discrete#es-connectors-overview-available-connectors] == Available connectors and feature support diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 69bf3d1b7db5a..6edccfcdb13f5 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -257,6 +257,10 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=refresh] (Optional, Boolean) If `true`, the request's actions must target an index alias. Defaults to `false`. +`require_data_stream`:: +(Optional, Boolean) If `true`, the request's actions must target a data stream (existing or to-be-created). +Defaults to `false`. + include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=source] diff --git a/docs/reference/esql/esql-async-query-get-api.asciidoc b/docs/reference/esql/esql-async-query-get-api.asciidoc index ec68313b2c490..82a6ae5b28b51 100644 --- a/docs/reference/esql/esql-async-query-get-api.asciidoc +++ b/docs/reference/esql/esql-async-query-get-api.asciidoc @@ -39,6 +39,10 @@ parameter is `true`. [[esql-async-query-get-api-query-params]] ==== {api-query-parms-title} +The API accepts the same parameters as the synchronous +<>, along with the following +parameters: + `wait_for_completion_timeout`:: (Optional, <>) Timeout duration to wait for the request to finish. Defaults to no timeout, diff --git a/docs/reference/esql/functions/description/match.asciidoc b/docs/reference/esql/functions/description/match.asciidoc index 2a27fe4814395..25f0571878d47 100644 --- a/docs/reference/esql/functions/description/match.asciidoc +++ b/docs/reference/esql/functions/description/match.asciidoc @@ -2,4 +2,4 @@ *Description* -Performs a match query on the specified field. Returns true if the provided query matches the row. +Performs a <> on the specified field. Returns true if the provided query matches the row. diff --git a/docs/reference/esql/functions/description/qstr.asciidoc b/docs/reference/esql/functions/description/qstr.asciidoc index 5ce9316405ad2..d9dbe364f607a 100644 --- a/docs/reference/esql/functions/description/qstr.asciidoc +++ b/docs/reference/esql/functions/description/qstr.asciidoc @@ -2,4 +2,4 @@ *Description* -Performs a query string query. Returns true if the provided query string matches the row. +Performs a <>. Returns true if the provided query string matches the row. diff --git a/docs/reference/esql/functions/description/st_envelope.asciidoc b/docs/reference/esql/functions/description/st_envelope.asciidoc new file mode 100644 index 0000000000000..6b7cf8d97538a --- /dev/null +++ b/docs/reference/esql/functions/description/st_envelope.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Determines the minimum bounding box of the supplied geometry. diff --git a/docs/reference/esql/functions/description/st_xmax.asciidoc b/docs/reference/esql/functions/description/st_xmax.asciidoc new file mode 100644 index 0000000000000..f33ec590bf2d4 --- /dev/null +++ b/docs/reference/esql/functions/description/st_xmax.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the maximum value of the `x` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value. diff --git a/docs/reference/esql/functions/description/st_xmin.asciidoc b/docs/reference/esql/functions/description/st_xmin.asciidoc new file mode 100644 index 0000000000000..b06cbfacde7bf --- /dev/null +++ b/docs/reference/esql/functions/description/st_xmin.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the minimum value of the `x` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value. diff --git a/docs/reference/esql/functions/description/st_ymax.asciidoc b/docs/reference/esql/functions/description/st_ymax.asciidoc new file mode 100644 index 0000000000000..f9475dd967562 --- /dev/null +++ b/docs/reference/esql/functions/description/st_ymax.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the maximum value of the `y` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value. diff --git a/docs/reference/esql/functions/description/st_ymin.asciidoc b/docs/reference/esql/functions/description/st_ymin.asciidoc new file mode 100644 index 0000000000000..7228c63a16030 --- /dev/null +++ b/docs/reference/esql/functions/description/st_ymin.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Extracts the minimum value of the `y` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value. diff --git a/docs/reference/esql/functions/description/term.asciidoc b/docs/reference/esql/functions/description/term.asciidoc new file mode 100644 index 0000000000000..c43aeb25a0ef7 --- /dev/null +++ b/docs/reference/esql/functions/description/term.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Performs a Term query on the specified field. Returns true if the provided term matches the row. diff --git a/docs/reference/esql/functions/examples/st_envelope.asciidoc b/docs/reference/esql/functions/examples/st_envelope.asciidoc new file mode 100644 index 0000000000000..df8c0ad5607fa --- /dev/null +++ b/docs/reference/esql/functions/examples/st_envelope.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_envelope] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_envelope-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_xmax.asciidoc b/docs/reference/esql/functions/examples/st_xmax.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_xmax.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_xmin.asciidoc b/docs/reference/esql/functions/examples/st_xmin.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_xmin.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_ymax.asciidoc b/docs/reference/esql/functions/examples/st_ymax.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_ymax.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_ymin.asciidoc b/docs/reference/esql/functions/examples/st_ymin.asciidoc new file mode 100644 index 0000000000000..5bba1761cf29c --- /dev/null +++ b/docs/reference/esql/functions/examples/st_ymin.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial_shapes.csv-spec[tag=st_x_y_min_max-result] +|=== + diff --git a/docs/reference/esql/functions/examples/term.asciidoc b/docs/reference/esql/functions/examples/term.asciidoc new file mode 100644 index 0000000000000..b9d57f366294b --- /dev/null +++ b/docs/reference/esql/functions/examples/term.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/term-function.csv-spec[tag=term-with-field] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/term-function.csv-spec[tag=term-with-field-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/equals.json b/docs/reference/esql/functions/kibana/definition/equals.json index 885d949f4b20f..40f3d54ba597a 100644 --- a/docs/reference/esql/functions/kibana/definition/equals.json +++ b/docs/reference/esql/functions/kibana/definition/equals.json @@ -77,6 +77,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/greater_than.json b/docs/reference/esql/functions/kibana/definition/greater_than.json index cf6e30a0a4547..ea2c0fb1212c7 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than.json @@ -23,6 +23,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json index 2535c68af6acf..7e1feb37e87b0 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json @@ -23,6 +23,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/less_than.json b/docs/reference/esql/functions/kibana/definition/less_than.json index a73754d200d46..71aae4d759ecf 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than.json +++ b/docs/reference/esql/functions/kibana/definition/less_than.json @@ -23,6 +23,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json index 7af477db32a34..f119b7ab2eb12 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json @@ -23,6 +23,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/match.json b/docs/reference/esql/functions/kibana/definition/match.json index 4a5b05a3f501b..7f2a8239cc0d0 100644 --- a/docs/reference/esql/functions/kibana/definition/match.json +++ b/docs/reference/esql/functions/kibana/definition/match.json @@ -2,21 +2,75 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "match", - "description" : "Performs a match query on the specified field. Returns true if the provided query matches the row.", + "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.", "signatures" : [ { "params" : [ { "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "boolean", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", "type" : "keyword", "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "date", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, "description" : "Field that the query will target." }, { "name" : "query", "type" : "keyword", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Value to find in the provided field." } ], "variadic" : false, @@ -26,15 +80,51 @@ "params" : [ { "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "date_nanos", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", "type" : "keyword", "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, "description" : "Field that the query will target." }, { "name" : "query", - "type" : "text", + "type" : "double", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Value to find in the provided field." } ], "variadic" : false, @@ -44,7 +134,25 @@ "params" : [ { "name" : "field", - "type" : "text", + "type" : "double", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", "optional" : false, "description" : "Field that the query will target." }, @@ -52,7 +160,7 @@ "name" : "query", "type" : "keyword", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Value to find in the provided field." } ], "variadic" : false, @@ -62,15 +170,357 @@ "params" : [ { "name" : "field", - "type" : "text", + "type" : "double", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "double", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "ip", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", "optional" : false, "description" : "Field that the query will target." }, { "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "double", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", "type" : "text", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "double", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "unsigned_long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "version", + "optional" : false, + "description" : "Value to find in the provided field." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/match_operator.json b/docs/reference/esql/functions/kibana/definition/match_operator.json index 7a0ace6168b59..44233bbddb653 100644 --- a/docs/reference/esql/functions/kibana/definition/match_operator.json +++ b/docs/reference/esql/functions/kibana/definition/match_operator.json @@ -2,21 +2,75 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "operator", "name" : "match_operator", - "description" : "Performs a match query on the specified field. Returns true if the provided query matches the row.", + "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.", "signatures" : [ { "params" : [ { "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "boolean", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", "type" : "keyword", "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "date", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, "description" : "Field that the query will target." }, { "name" : "query", "type" : "keyword", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Value to find in the provided field." } ], "variadic" : false, @@ -26,15 +80,51 @@ "params" : [ { "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "date_nanos", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", "type" : "keyword", "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, "description" : "Field that the query will target." }, { "name" : "query", - "type" : "text", + "type" : "double", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Value to find in the provided field." } ], "variadic" : false, @@ -44,7 +134,25 @@ "params" : [ { "name" : "field", - "type" : "text", + "type" : "double", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", "optional" : false, "description" : "Field that the query will target." }, @@ -52,7 +160,7 @@ "name" : "query", "type" : "keyword", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Value to find in the provided field." } ], "variadic" : false, @@ -62,15 +170,357 @@ "params" : [ { "name" : "field", - "type" : "text", + "type" : "double", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "double", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "ip", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", "optional" : false, "description" : "Field that the query will target." }, { "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "double", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", "type" : "text", "optional" : false, - "description" : "Text you wish to find in the provided field." + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "double", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "integer", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "unsigned_long", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Value to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "version", + "optional" : false, + "description" : "Value to find in the provided field." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/not_equals.json b/docs/reference/esql/functions/kibana/definition/not_equals.json index 24f31115cbc37..d35a5b43ec238 100644 --- a/docs/reference/esql/functions/kibana/definition/not_equals.json +++ b/docs/reference/esql/functions/kibana/definition/not_equals.json @@ -77,6 +77,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/qstr.json b/docs/reference/esql/functions/kibana/definition/qstr.json index 76473349a3414..3b091bfe2e13b 100644 --- a/docs/reference/esql/functions/kibana/definition/qstr.json +++ b/docs/reference/esql/functions/kibana/definition/qstr.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "qstr", - "description" : "Performs a query string query. Returns true if the provided query string matches the row.", + "description" : "Performs a <>. Returns true if the provided query string matches the row.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/st_envelope.json b/docs/reference/esql/functions/kibana/definition/st_envelope.json new file mode 100644 index 0000000000000..6c00dda265ac7 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_envelope.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_envelope", + "description" : "Determines the minimum bounding box of the supplied geometry.", + "signatures" : [ + { + "params" : [ + { + "name" : "geometry", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "geometry", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "geometry", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "geo_shape" + }, + { + "params" : [ + { + "name" : "geometry", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "geo_shape" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| KEEP abbrev, airport, envelope" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_xmax.json b/docs/reference/esql/functions/kibana/definition/st_xmax.json new file mode 100644 index 0000000000000..7be22617c0992 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_xmax.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_xmax", + "description" : "Extracts the maximum value of the `x` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_xmin.json b/docs/reference/esql/functions/kibana/definition/st_xmin.json new file mode 100644 index 0000000000000..8052fdb861cea --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_xmin.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_xmin", + "description" : "Extracts the minimum value of the `x` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_ymax.json b/docs/reference/esql/functions/kibana/definition/st_ymax.json new file mode 100644 index 0000000000000..1a53f7388ea56 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_ymax.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_ymax", + "description" : "Extracts the maximum value of the `y` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/st_ymin.json b/docs/reference/esql/functions/kibana/definition/st_ymin.json new file mode 100644 index 0000000000000..e11722a8f9c07 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_ymin.json @@ -0,0 +1,61 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "st_ymin", + "description" : "Extracts the minimum value of the `y` coordinates from the supplied geometry.\nIf the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "cartesian_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_point", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "point", + "type" : "geo_shape", + "optional" : false, + "description" : "Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM airport_city_boundaries\n| WHERE abbrev == \"CPH\"\n| EVAL envelope = ST_ENVELOPE(city_boundary)\n| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope)\n| KEEP abbrev, airport, xmin, xmax, ymin, ymax" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/term.json b/docs/reference/esql/functions/kibana/definition/term.json new file mode 100644 index 0000000000000..d8bb61fd596a1 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/term.json @@ -0,0 +1,85 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "term", + "description" : "Performs a Term query on the specified field. Returns true if the provided term matches the row.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Term you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "text", + "optional" : false, + "description" : "Term you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Term you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "text", + "optional" : false, + "description" : "Term you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ], + "examples" : [ + "from books \n| where term(author, \"gabriel\") \n| keep book_no, title\n| limit 3;" + ], + "preview" : true, + "snapshot_only" : true +} diff --git a/docs/reference/esql/functions/kibana/docs/match.md b/docs/reference/esql/functions/kibana/docs/match.md index b866637b41b85..adf6de91c90f1 100644 --- a/docs/reference/esql/functions/kibana/docs/match.md +++ b/docs/reference/esql/functions/kibana/docs/match.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MATCH -Performs a match query on the specified field. Returns true if the provided query matches the row. +Performs a <> on the specified field. Returns true if the provided query matches the row. ``` FROM books diff --git a/docs/reference/esql/functions/kibana/docs/match_operator.md b/docs/reference/esql/functions/kibana/docs/match_operator.md index fda8b24ff76cc..b0b6196798087 100644 --- a/docs/reference/esql/functions/kibana/docs/match_operator.md +++ b/docs/reference/esql/functions/kibana/docs/match_operator.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MATCH_OPERATOR -Performs a match query on the specified field. Returns true if the provided query matches the row. +Performs a <> on the specified field. Returns true if the provided query matches the row. ``` FROM books diff --git a/docs/reference/esql/functions/kibana/docs/qstr.md b/docs/reference/esql/functions/kibana/docs/qstr.md index 9b5dc3f9a22eb..7df5a2fe08a9d 100644 --- a/docs/reference/esql/functions/kibana/docs/qstr.md +++ b/docs/reference/esql/functions/kibana/docs/qstr.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### QSTR -Performs a query string query. Returns true if the provided query string matches the row. +Performs a <>. Returns true if the provided query string matches the row. ``` FROM books diff --git a/docs/reference/esql/functions/kibana/docs/st_envelope.md b/docs/reference/esql/functions/kibana/docs/st_envelope.md new file mode 100644 index 0000000000000..5f4c3e4809a82 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_envelope.md @@ -0,0 +1,13 @@ + + +### ST_ENVELOPE +Determines the minimum bounding box of the supplied geometry. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| KEEP abbrev, airport, envelope +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_xmax.md b/docs/reference/esql/functions/kibana/docs/st_xmax.md new file mode 100644 index 0000000000000..bbde89df76fd0 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_xmax.md @@ -0,0 +1,15 @@ + + +### ST_XMAX +Extracts the maximum value of the `x` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_xmin.md b/docs/reference/esql/functions/kibana/docs/st_xmin.md new file mode 100644 index 0000000000000..1a6cee7dcfd62 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_xmin.md @@ -0,0 +1,15 @@ + + +### ST_XMIN +Extracts the minimum value of the `x` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_ymax.md b/docs/reference/esql/functions/kibana/docs/st_ymax.md new file mode 100644 index 0000000000000..61c9b6c288ca5 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_ymax.md @@ -0,0 +1,15 @@ + + +### ST_YMAX +Extracts the maximum value of the `y` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_ymin.md b/docs/reference/esql/functions/kibana/docs/st_ymin.md new file mode 100644 index 0000000000000..f5817f10f20a5 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_ymin.md @@ -0,0 +1,15 @@ + + +### ST_YMIN +Extracts the minimum value of the `y` coordinates from the supplied geometry. +If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value. + +``` +FROM airport_city_boundaries +| WHERE abbrev == "CPH" +| EVAL envelope = ST_ENVELOPE(city_boundary) +| EVAL xmin = ST_XMIN(envelope), xmax = ST_XMAX(envelope), ymin = ST_YMIN(envelope), ymax = ST_YMAX(envelope) +| KEEP abbrev, airport, xmin, xmax, ymin, ymax +``` diff --git a/docs/reference/esql/functions/kibana/docs/term.md b/docs/reference/esql/functions/kibana/docs/term.md new file mode 100644 index 0000000000000..83e61a949208d --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/term.md @@ -0,0 +1,13 @@ + + +### TERM +Performs a Term query on the specified field. Returns true if the provided term matches the row. + +``` +from books +| where term(author, "gabriel") +| keep book_no, title +| limit 3; +``` diff --git a/docs/reference/esql/functions/layout/st_envelope.asciidoc b/docs/reference/esql/functions/layout/st_envelope.asciidoc new file mode 100644 index 0000000000000..a20d4275e0c9f --- /dev/null +++ b/docs/reference/esql/functions/layout/st_envelope.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_envelope]] +=== `ST_ENVELOPE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_envelope.svg[Embedded,opts=inline] + +include::../parameters/st_envelope.asciidoc[] +include::../description/st_envelope.asciidoc[] +include::../types/st_envelope.asciidoc[] +include::../examples/st_envelope.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_xmax.asciidoc b/docs/reference/esql/functions/layout/st_xmax.asciidoc new file mode 100644 index 0000000000000..b0c5e7695521e --- /dev/null +++ b/docs/reference/esql/functions/layout/st_xmax.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_xmax]] +=== `ST_XMAX` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_xmax.svg[Embedded,opts=inline] + +include::../parameters/st_xmax.asciidoc[] +include::../description/st_xmax.asciidoc[] +include::../types/st_xmax.asciidoc[] +include::../examples/st_xmax.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_xmin.asciidoc b/docs/reference/esql/functions/layout/st_xmin.asciidoc new file mode 100644 index 0000000000000..55fbad88c4cf0 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_xmin.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_xmin]] +=== `ST_XMIN` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_xmin.svg[Embedded,opts=inline] + +include::../parameters/st_xmin.asciidoc[] +include::../description/st_xmin.asciidoc[] +include::../types/st_xmin.asciidoc[] +include::../examples/st_xmin.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_ymax.asciidoc b/docs/reference/esql/functions/layout/st_ymax.asciidoc new file mode 100644 index 0000000000000..e1022de4ba664 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_ymax.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_ymax]] +=== `ST_YMAX` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_ymax.svg[Embedded,opts=inline] + +include::../parameters/st_ymax.asciidoc[] +include::../description/st_ymax.asciidoc[] +include::../types/st_ymax.asciidoc[] +include::../examples/st_ymax.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_ymin.asciidoc b/docs/reference/esql/functions/layout/st_ymin.asciidoc new file mode 100644 index 0000000000000..65511e1925e27 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_ymin.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_ymin]] +=== `ST_YMIN` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_ymin.svg[Embedded,opts=inline] + +include::../parameters/st_ymin.asciidoc[] +include::../description/st_ymin.asciidoc[] +include::../types/st_ymin.asciidoc[] +include::../examples/st_ymin.asciidoc[] diff --git a/docs/reference/esql/functions/layout/term.asciidoc b/docs/reference/esql/functions/layout/term.asciidoc new file mode 100644 index 0000000000000..1fe94491bed04 --- /dev/null +++ b/docs/reference/esql/functions/layout/term.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-term]] +=== `TERM` + +preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +*Syntax* + +[.text-center] +image::esql/functions/signature/term.svg[Embedded,opts=inline] + +include::../parameters/term.asciidoc[] +include::../description/term.asciidoc[] +include::../types/term.asciidoc[] +include::../examples/term.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/match.asciidoc b/docs/reference/esql/functions/parameters/match.asciidoc index f18adb28cd20c..46f6acad9e128 100644 --- a/docs/reference/esql/functions/parameters/match.asciidoc +++ b/docs/reference/esql/functions/parameters/match.asciidoc @@ -6,4 +6,4 @@ Field that the query will target. `query`:: -Text you wish to find in the provided field. +Value to find in the provided field. diff --git a/docs/reference/esql/functions/parameters/st_envelope.asciidoc b/docs/reference/esql/functions/parameters/st_envelope.asciidoc new file mode 100644 index 0000000000000..a31c6a85de367 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_envelope.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`geometry`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_xmax.asciidoc b/docs/reference/esql/functions/parameters/st_xmax.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_xmax.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_xmin.asciidoc b/docs/reference/esql/functions/parameters/st_xmin.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_xmin.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_ymax.asciidoc b/docs/reference/esql/functions/parameters/st_ymax.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_ymax.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/st_ymin.asciidoc b/docs/reference/esql/functions/parameters/st_ymin.asciidoc new file mode 100644 index 0000000000000..788f3485af297 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_ymin.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`point`:: +Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/term.asciidoc b/docs/reference/esql/functions/parameters/term.asciidoc new file mode 100644 index 0000000000000..edba8625d04c5 --- /dev/null +++ b/docs/reference/esql/functions/parameters/term.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Field that the query will target. + +`query`:: +Term you wish to find in the provided field. diff --git a/docs/reference/esql/functions/search-functions.asciidoc b/docs/reference/esql/functions/search-functions.asciidoc index 943a262497d4c..238813c382c8c 100644 --- a/docs/reference/esql/functions/search-functions.asciidoc +++ b/docs/reference/esql/functions/search-functions.asciidoc @@ -5,6 +5,14 @@ Full-text Search functions ++++ +Full text functions are used to search for text in fields. +<> is used to analyze the query before it is searched. + +Full text functions can be used to match <>. +A multivalued field that contains a value that matches a full text query is considered to match the query. + +See <> for information on the limitations of full text search. + {esql} supports these full-text search functions: // tag::search_list[] diff --git a/docs/reference/esql/functions/search.asciidoc b/docs/reference/esql/functions/search.asciidoc index ae1b003b65abb..ba399ead8adfc 100644 --- a/docs/reference/esql/functions/search.asciidoc +++ b/docs/reference/esql/functions/search.asciidoc @@ -6,7 +6,10 @@ The only search operator is match (`:`). preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] -The match operator performs a <> on the specified field. Returns true if the provided query matches the row. +The match operator performs a <> on the specified field. +Returns true if the provided query matches the row. + +The match operator is equivalent to the <>. [.text-center] image::esql/functions/signature/match_operator.svg[Embedded,opts=inline] diff --git a/docs/reference/esql/functions/signature/st_envelope.svg b/docs/reference/esql/functions/signature/st_envelope.svg new file mode 100644 index 0000000000000..885a60e6fd86f --- /dev/null +++ b/docs/reference/esql/functions/signature/st_envelope.svg @@ -0,0 +1 @@ +ST_ENVELOPE(geometry) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_xmax.svg b/docs/reference/esql/functions/signature/st_xmax.svg new file mode 100644 index 0000000000000..348d5a7f72763 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_xmax.svg @@ -0,0 +1 @@ +ST_XMAX(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_xmin.svg b/docs/reference/esql/functions/signature/st_xmin.svg new file mode 100644 index 0000000000000..13d479b0458be --- /dev/null +++ b/docs/reference/esql/functions/signature/st_xmin.svg @@ -0,0 +1 @@ +ST_XMIN(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_ymax.svg b/docs/reference/esql/functions/signature/st_ymax.svg new file mode 100644 index 0000000000000..e6ecb00185c84 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_ymax.svg @@ -0,0 +1 @@ +ST_YMAX(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_ymin.svg b/docs/reference/esql/functions/signature/st_ymin.svg new file mode 100644 index 0000000000000..ae722f1edc3d4 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_ymin.svg @@ -0,0 +1 @@ +ST_YMIN(point) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/term.svg b/docs/reference/esql/functions/signature/term.svg new file mode 100644 index 0000000000000..955dd7fa215ab --- /dev/null +++ b/docs/reference/esql/functions/signature/term.svg @@ -0,0 +1 @@ +TERM(field,query) \ No newline at end of file diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc index eee44d337b4c6..c6a8467b39996 100644 --- a/docs/reference/esql/functions/spatial-functions.asciidoc +++ b/docs/reference/esql/functions/spatial-functions.asciidoc @@ -15,6 +15,11 @@ * <> * <> * <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> // end::spatial_list[] include::layout/st_distance.asciidoc[] @@ -24,3 +29,8 @@ include::layout/st_contains.asciidoc[] include::layout/st_within.asciidoc[] include::layout/st_x.asciidoc[] include::layout/st_y.asciidoc[] +include::layout/st_envelope.asciidoc[] +include::layout/st_xmax.asciidoc[] +include::layout/st_xmin.asciidoc[] +include::layout/st_ymax.asciidoc[] +include::layout/st_ymin.asciidoc[] diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc index 8d48b7ebf084a..1bb8bf2122b35 100644 --- a/docs/reference/esql/functions/types/equals.asciidoc +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -9,6 +9,8 @@ boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean date | date | boolean +date | date_nanos | boolean +date_nanos | date | boolean date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc index 8000fd34c8507..39253ac445f42 100644 --- a/docs/reference/esql/functions/types/greater_than.asciidoc +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -6,6 +6,8 @@ |=== lhs | rhs | result date | date | boolean +date | date_nanos | boolean +date_nanos | date | boolean date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc index 8000fd34c8507..39253ac445f42 100644 --- a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -6,6 +6,8 @@ |=== lhs | rhs | result date | date | boolean +date | date_nanos | boolean +date_nanos | date | boolean date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc index 8000fd34c8507..39253ac445f42 100644 --- a/docs/reference/esql/functions/types/less_than.asciidoc +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -6,6 +6,8 @@ |=== lhs | rhs | result date | date | boolean +date | date_nanos | boolean +date_nanos | date | boolean date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc index 8000fd34c8507..39253ac445f42 100644 --- a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -6,6 +6,8 @@ |=== lhs | rhs | result date | date | boolean +date | date_nanos | boolean +date_nanos | date | boolean date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean diff --git a/docs/reference/esql/functions/types/match.asciidoc b/docs/reference/esql/functions/types/match.asciidoc index 7523b29c62b1d..402277af44749 100644 --- a/docs/reference/esql/functions/types/match.asciidoc +++ b/docs/reference/esql/functions/types/match.asciidoc @@ -5,8 +5,33 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | query | result +boolean | boolean | boolean +boolean | keyword | boolean +date | date | boolean +date | keyword | boolean +date_nanos | date_nanos | boolean +date_nanos | keyword | boolean +double | double | boolean +double | integer | boolean +double | keyword | boolean +double | long | boolean +integer | double | boolean +integer | integer | boolean +integer | keyword | boolean +integer | long | boolean +ip | ip | boolean +ip | keyword | boolean keyword | keyword | boolean -keyword | text | boolean +long | double | boolean +long | integer | boolean +long | keyword | boolean +long | long | boolean text | keyword | boolean -text | text | boolean +unsigned_long | double | boolean +unsigned_long | integer | boolean +unsigned_long | keyword | boolean +unsigned_long | long | boolean +unsigned_long | unsigned_long | boolean +version | keyword | boolean +version | version | boolean |=== diff --git a/docs/reference/esql/functions/types/match_operator.asciidoc b/docs/reference/esql/functions/types/match_operator.asciidoc index 7523b29c62b1d..402277af44749 100644 --- a/docs/reference/esql/functions/types/match_operator.asciidoc +++ b/docs/reference/esql/functions/types/match_operator.asciidoc @@ -5,8 +5,33 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | query | result +boolean | boolean | boolean +boolean | keyword | boolean +date | date | boolean +date | keyword | boolean +date_nanos | date_nanos | boolean +date_nanos | keyword | boolean +double | double | boolean +double | integer | boolean +double | keyword | boolean +double | long | boolean +integer | double | boolean +integer | integer | boolean +integer | keyword | boolean +integer | long | boolean +ip | ip | boolean +ip | keyword | boolean keyword | keyword | boolean -keyword | text | boolean +long | double | boolean +long | integer | boolean +long | keyword | boolean +long | long | boolean text | keyword | boolean -text | text | boolean +unsigned_long | double | boolean +unsigned_long | integer | boolean +unsigned_long | keyword | boolean +unsigned_long | long | boolean +unsigned_long | unsigned_long | boolean +version | keyword | boolean +version | version | boolean |=== diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc index 8d48b7ebf084a..1bb8bf2122b35 100644 --- a/docs/reference/esql/functions/types/not_equals.asciidoc +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -9,6 +9,8 @@ boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean date | date | boolean +date | date_nanos | boolean +date_nanos | date | boolean date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean diff --git a/docs/reference/esql/functions/types/st_envelope.asciidoc b/docs/reference/esql/functions/types/st_envelope.asciidoc new file mode 100644 index 0000000000000..43355394c6015 --- /dev/null +++ b/docs/reference/esql/functions/types/st_envelope.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +geometry | result +cartesian_point | cartesian_shape +cartesian_shape | cartesian_shape +geo_point | geo_shape +geo_shape | geo_shape +|=== diff --git a/docs/reference/esql/functions/types/st_xmax.asciidoc b/docs/reference/esql/functions/types/st_xmax.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_xmax.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/st_xmin.asciidoc b/docs/reference/esql/functions/types/st_xmin.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_xmin.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/st_ymax.asciidoc b/docs/reference/esql/functions/types/st_ymax.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_ymax.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/st_ymin.asciidoc b/docs/reference/esql/functions/types/st_ymin.asciidoc new file mode 100644 index 0000000000000..418c5cafae6f3 --- /dev/null +++ b/docs/reference/esql/functions/types/st_ymin.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +point | result +cartesian_point | double +cartesian_shape | double +geo_point | double +geo_shape | double +|=== diff --git a/docs/reference/esql/functions/types/term.asciidoc b/docs/reference/esql/functions/types/term.asciidoc new file mode 100644 index 0000000000000..7523b29c62b1d --- /dev/null +++ b/docs/reference/esql/functions/types/term.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | query | result +keyword | keyword | boolean +keyword | text | boolean +text | keyword | boolean +text | text | boolean +|=== diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index d7b3454dcff56..a514d36a1bfef 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -52,6 +52,8 @@ Defaults to `all`. (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] + [[alias-exists-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index 41d62fb70e01b..d4c5b92116949 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -58,3 +58,5 @@ Defaults to `all`. `ignore_unavailable`:: (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 037d7abeb2a36..c7b779a994a05 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -35,6 +35,19 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. +[discrete] +[[adaptive-allocations]] +=== Adaptive allocations + +Adaptive allocations allow inference services to dynamically adjust the number of model allocations based on the current load. + +When adaptive allocations are enabled: + +* The number of allocations scales up automatically when the load increases. +- Allocations scale down to a minimum of 0 when the load decreases, saving resources. + +For more information about adaptive allocations and resources, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] documentation. + //[discrete] //[[default-enpoints]] //=== Default {infer} endpoints diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index e7e25ec98b49d..4f82889f562d8 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -10,7 +10,6 @@ Creates an {infer} endpoint to perform an {infer} task. * For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. ==== - [discrete] [[put-inference-api-request]] ==== {api-request-title} @@ -47,6 +46,14 @@ Refer to the service list in the <> API. In the response, look for `"state": "fully_allocated"` and ensure the `"allocation_count"` matches the `"target_allocation_count"`. +* Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +==== + + The following services are available through the {infer} API. You can find the available task types next to the service name. Click the links to review the configuration details of the services: @@ -67,4 +74,17 @@ Click the links to review the configuration details of the services: * <> (`text_embedding`) The {es} and ELSER services run on a {ml} node in your {es} cluster. The rest of -the services connect to external providers. \ No newline at end of file +the services connect to external providers. + +[discrete] +[[adaptive-allocations-put-inference]] +==== Adaptive allocations + +Adaptive allocations allow inference services to dynamically adjust the number of model allocations based on the current load. + +When adaptive allocations are enabled: + +- The number of allocations scales up automatically when the load increases. +- Allocations scale down to a minimum of 0 when the load decreases, saving resources. + +For more information about adaptive allocations and resources, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] documentation. \ No newline at end of file diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc index 21643133553e1..9211e2d08e88b 100644 --- a/docs/reference/inference/service-openai.asciidoc +++ b/docs/reference/inference/service-openai.asciidoc @@ -76,6 +76,12 @@ https://platform.openai.com/api-keys[API keys section]. include::inference-shared.asciidoc[tag=api-key-admonition] -- +`dimensions`::: +(Optional, integer) +The number of dimensions the resulting output embeddings should have. +Only supported in `text-embedding-3` and later models. +If not set the OpenAI defined default for the model is used. + `model_id`::: (Required, string) The name of the model to use for the {infer} task. @@ -134,8 +140,8 @@ Specifies the user issuing the request, which can be used for abuse detection. [[inference-example-openai]] ==== OpenAI service example -The following example shows how to create an {infer} endpoint called -`openai-embeddings` to perform a `text_embedding` task type. +The following example shows how to create an {infer} endpoint called `openai-embeddings` to perform a `text_embedding` task type. +The embeddings created by requests to this endpoint will have 128 dimensions. [source,console] ------------------------------------------------------------ @@ -144,14 +150,14 @@ PUT _inference/text_embedding/openai-embeddings "service": "openai", "service_settings": { "api_key": "", - "model_id": "text-embedding-ada-002" + "model_id": "text-embedding-3-small", + "dimensions": 128 } } ------------------------------------------------------------ // TEST[skip:TBD] -The next example shows how to create an {infer} endpoint called -`openai-completion` to perform a `completion` task type. +The next example shows how to create an {infer} endpoint called `openai-completion` to perform a `completion` task type. [source,console] ------------------------------------------------------------ diff --git a/docs/reference/ingest/processors/attachment.asciidoc b/docs/reference/ingest/processors/attachment.asciidoc index fd2866906c1d0..bd5b8db562ae2 100644 --- a/docs/reference/ingest/processors/attachment.asciidoc +++ b/docs/reference/ingest/processors/attachment.asciidoc @@ -19,15 +19,15 @@ representation. The processor will skip the base64 decoding then. .Attachment options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The field to get the base64 encoded field from -| `target_field` | no | attachment | The field that will hold the attachment information -| `indexed_chars` | no | 100000 | The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit. -| `indexed_chars_field` | no | `null` | Field name from which you can overwrite the number of chars being used for extraction. See `indexed_chars`. -| `properties` | no | all properties | Array of properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` -| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -| `remove_binary` | no | `false` | If `true`, the binary `field` will be removed from the document -| `resource_name` | no | | Field containing the name of the resource to decode. If specified, the processor passes this resource name to the underlying Tika library to enable https://tika.apache.org/1.24.1/detection.html#Resource_Name_Based_Detection[Resource Name Based Detection]. +| Name | Required | Default | Description +| `field` | yes | - | The field to get the base64 encoded field from +| `target_field` | no | attachment | The field that will hold the attachment information +| `indexed_chars` | no | 100000 | The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit. +| `indexed_chars_field` | no | `null` | Field name from which you can overwrite the number of chars being used for extraction. See `indexed_chars`. +| `properties` | no | all properties | Array of properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `remove_binary` | encouraged | `false` | If `true`, the binary `field` will be removed from the document. This option is not required, but setting it explicitly is encouraged, and omitting it will result in a warning. +| `resource_name` | no | | Field containing the name of the resource to decode. If specified, the processor passes this resource name to the underlying Tika library to enable https://tika.apache.org/1.24.1/detection.html#Resource_Name_Based_Detection[Resource Name Based Detection]. |====== [discrete] @@ -58,7 +58,7 @@ PUT _ingest/pipeline/attachment { "attachment" : { "field" : "data", - "remove_binary": false + "remove_binary": true } } ] @@ -82,7 +82,6 @@ The document's `attachment` object contains extracted properties for the file: "_seq_no": 22, "_primary_term": 1, "_source": { - "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", "attachment": { "content_type": "application/rtf", "language": "ro", @@ -94,9 +93,6 @@ The document's `attachment` object contains extracted properties for the file: ---- // TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -NOTE: Keeping the binary as a field within the document might consume a lot of resources. It is highly recommended - to remove that field from the document. Set `remove_binary` to `true` to automatically remove the field. - [[attachment-fields]] ==== Exported fields @@ -143,7 +139,7 @@ PUT _ingest/pipeline/attachment "attachment" : { "field" : "data", "properties": [ "content", "title" ], - "remove_binary": false + "remove_binary": true } } ] @@ -154,6 +150,59 @@ NOTE: Extracting contents from binary data is a resource intensive operation and consumes a lot of resources. It is highly recommended to run pipelines using this processor in a dedicated ingest node. +[[attachment-keep-binary]] +==== Keeping the attachment binary + +Keeping the binary as a field within the document might consume a lot of resources. It is highly recommended to remove +that field from the document, by setting `remove_binary` to `true` to automatically remove the field, as in the other +examples shown on this page. If you _do_ want to keep the binary field, explicitly set `remove_binary` to `false` to +avoid the warning you get from omitting it: + +[source,console] +---- +PUT _ingest/pipeline/attachment +{ + "description" : "Extract attachment information including original binary", + "processors" : [ + { + "attachment" : { + "field" : "data", + "remove_binary": false + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=attachment +{ + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" +} +GET my-index-000001/_doc/my_id +---- + +The document's `_source` object includes the original binary field: + +[source,console-result] +---- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 22, + "_primary_term": 1, + "_source": { + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + "attachment": { + "content_type": "application/rtf", + "language": "ro", + "content": "Lorem ipsum dolor sit amet", + "content_length": 28 + } + } +} +---- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + [[attachment-cbor]] ==== Use the attachment processor with CBOR @@ -170,7 +219,7 @@ PUT _ingest/pipeline/cbor-attachment { "attachment" : { "field" : "data", - "remove_binary": false + "remove_binary": true } } ] @@ -226,7 +275,7 @@ PUT _ingest/pipeline/attachment "field" : "data", "indexed_chars" : 11, "indexed_chars_field" : "max_size", - "remove_binary": false + "remove_binary": true } } ] @@ -250,7 +299,6 @@ Returns this: "_seq_no": 35, "_primary_term": 1, "_source": { - "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", "attachment": { "content_type": "application/rtf", "language": "is", @@ -274,7 +322,7 @@ PUT _ingest/pipeline/attachment "field" : "data", "indexed_chars" : 11, "indexed_chars_field" : "max_size", - "remove_binary": false + "remove_binary": true } } ] @@ -299,7 +347,6 @@ Returns this: "_seq_no": 40, "_primary_term": 1, "_source": { - "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", "max_size": 5, "attachment": { "content_type": "application/rtf", @@ -358,7 +405,7 @@ PUT _ingest/pipeline/attachment "attachment": { "target_field": "_ingest._value.attachment", "field": "_ingest._value.data", - "remove_binary": false + "remove_binary": true } } } @@ -396,7 +443,6 @@ Returns this: "attachments" : [ { "filename" : "ipsum.txt", - "data" : "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=", "attachment" : { "content_type" : "text/plain; charset=ISO-8859-1", "language" : "en", @@ -406,7 +452,6 @@ Returns this: }, { "filename" : "test.txt", - "data" : "VGhpcyBpcyBhIHRlc3QK", "attachment" : { "content_type" : "text/plain; charset=ISO-8859-1", "language" : "en", diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 9c6f0592a1d91..e079b9d665290 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -735,3 +735,70 @@ You can also specify the target field as follows: In this case, {feat-imp} is exposed in the `my_field.foo.feature_importance` field. + + +[discrete] +[[inference-processor-examples]] +==== {infer-cap} processor examples + +The following example uses an <> in an {infer} processor named `query_helper_pipeline` to perform a chat completion task. +The processor generates an {es} query from natural language input using a prompt designed for a completion task type. +Refer to <> for the {infer} service you use and check the corresponding examples of setting up an endpoint with the chat completion task type. + + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/query_helper_pipeline +{ + "processors": [ + { + "script": { + "source": "ctx.prompt = 'Please generate an elasticsearch search query on index `articles_index` for the following natural language query. Dates are in the field `@timestamp`, document types are in the field `type` (options are `news`, `publication`), categories in the field `category` and can be multiple (options are `medicine`, `pharmaceuticals`, `technology`), and document names are in the field `title` which should use a fuzzy match. Ignore fields which cannot be determined from the natural language query context: ' + ctx.content" <1> + } + }, + { + "inference": { + "model_id": "openai_chat_completions", <2> + "input_output": { + "input_field": "prompt", + "output_field": "query" + } + } + }, + { + "remove": { + "field": "prompt" + } + } + ] +} +-------------------------------------------------- +// TEST[skip: An inference endpoint is required.] +<1> The `prompt` field contains the prompt used for the completion task, created with <>. +`+ ctx.content` appends the natural language input to the prompt. +<2> The ID of the pre-configured {infer} endpoint, which utilizes the <> with the `completion` task type. + +The following API request will simulate running a document through the ingest pipeline created previously: + +[source,console] +-------------------------------------------------- +POST _ingest/pipeline/query_helper_pipeline/_simulate +{ + "docs": [ + { + "_source": { + "content": "artificial intelligence in medicine articles published in the last 12 months" <1> + } + } + ] +} +-------------------------------------------------- +// TEST[skip: An inference processor with an inference endpoint is required.] +<1> The natural language query used to generate an {es} query within the prompt created by the {infer} processor. + + +[discrete] +[[infer-proc-readings]] +==== Further readings + +* https://www.elastic.co/search-labs/blog/openwebcrawler-llms-semantic-text-resume-job-search[Which job is the best for you? Using LLMs and semantic_text to match resumes to jobs] \ No newline at end of file diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index e6e11d6dd539f..199a59a5b143c 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -232,8 +232,8 @@ it will be set to the length of the first vector added to the field. `index`:: (Optional, Boolean) -If `true`, you can search this field using the <>. Defaults to `true`. +If `true`, you can search this field using the <> +or <> . Defaults to `true`. [[dense-vector-similarity]] `similarity`:: diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index f76a9352c2fe8..96dc402e10c60 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -12,13 +12,14 @@ Long passages are <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. -This field type and the <> type make it simpler to perform semantic search on your data. -If you don't specify an inference endpoint, the <> is used by default. +This field type and the <> type make it simpler to perform semantic search on your data. + +If you don’t specify an inference endpoint, the `inference_id` field defaults to `.elser-2-elasticsearch`, a preconfigured endpoint for the elasticsearch service. Using `semantic_text`, you won't need to specify how to generate embeddings for your data, or how to index it. The {infer} endpoint automatically determines the embedding generation, indexing, and query to use. -If you use the ELSER service, you can set up `semantic_text` with the following API request: +If you use the preconfigured `.elser-2-elasticsearch` endpoint, you can set up `semantic_text` with the following API request: [source,console] ------------------------------------------------------------ @@ -34,7 +35,7 @@ PUT my-index-000001 } ------------------------------------------------------------ -If you use a service other than ELSER, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` as the following example demonstrates: +To use a custom {infer} endpoint instead of the default `.elser-2-elasticsearch`, you must <> and specify its `inference_id` when setting up the `semantic_text` field type. [source,console] ------------------------------------------------------------ @@ -53,8 +54,7 @@ PUT my-index-000002 // TEST[skip:Requires inference endpoint] <1> The `inference_id` of the {infer} endpoint to use to generate embeddings. - -The recommended way to use semantic_text is by having dedicated {infer} endpoints for ingestion and search. +The recommended way to use `semantic_text` is by having dedicated {infer} endpoints for ingestion and search. This ensures that search speed remains unaffected by ingestion workloads, and vice versa. After creating dedicated {infer} endpoints for both, you can reference them using the `inference_id` and `search_inference_id` parameters when setting up the index mapping for an index that uses the `semantic_text` field. @@ -82,10 +82,11 @@ PUT my-index-000003 `inference_id`:: (Required, string) -{infer-cap} endpoint that will be used to generate the embeddings for the field. +{infer-cap} endpoint that will be used to generate embeddings for the field. +By default, `.elser-2-elasticsearch` is used. This parameter cannot be updated. Use the <> to create the endpoint. -If `search_inference_id` is specified, the {infer} endpoint defined by `inference_id` will only be used at index time. +If `search_inference_id` is specified, the {infer} endpoint will only be used at index time. `search_inference_id`:: (Optional, string) @@ -112,50 +113,43 @@ Trying to <> that is used on a {infer-cap} endpoints have a limit on the amount of text they can process. To allow for large amounts of text to be used in semantic search, `semantic_text` automatically generates smaller passages if needed, called _chunks_. -Each chunk will include the text subpassage and the corresponding embedding generated from it. +Each chunk refers to a passage of the text and the corresponding embedding generated from it. When querying, the individual passages will be automatically searched for each document, and the most relevant passage will be used to compute a score. For more details on chunking and how to configure chunking settings, see <> in the Inference API documentation. +Refer to <> to learn more about +semantic search using `semantic_text` and the `semantic` query. [discrete] -[[semantic-text-structure]] -==== `semantic_text` structure +[[semantic-text-highlighting]] +==== Extracting Relevant Fragments from Semantic Text -Once a document is ingested, a `semantic_text` field will have the following structure: +You can extract the most relevant fragments from a semantic text field by using the <> in the <>. -[source,console-result] +[source,console] ------------------------------------------------------------ -"inference_field": { - "text": "these are not the droids you're looking for", <1> - "inference": { - "inference_id": "my-elser-endpoint", <2> - "model_settings": { <3> - "task_type": "sparse_embedding" +PUT test-index +{ + "query": { + "semantic": { + "field": "my_semantic_field" + } }, - "chunks": [ <4> - { - "text": "these are not the droids you're looking for", - "embeddings": { - (...) + "highlight": { + "fields": { + "my_semantic_field": { + "type": "semantic", + "number_of_fragments": 2, <1> + "order": "score" <2> + } } - } - ] - } + } } ------------------------------------------------------------ -// TEST[skip:TBD] -<1> The field will become an object structure to accommodate both the original -text and the inference results. -<2> The `inference_id` used to generate the embeddings. -<3> Model settings, including the task type and dimensions/similarity if -applicable. -<4> Inference results will be grouped in chunks, each with its corresponding -text and embeddings. - -Refer to <> to learn more about -semantic search using `semantic_text` and the `semantic` query. - +// TEST[skip:Requires inference endpoint] +<1> Specifies the maximum number of fragments to return. +<2> Sorts highlighted fragments by score when set to `score`. By default, fragments will be output in the order they appear in the field (order: none). [discrete] [[custom-indexing]] @@ -208,7 +202,7 @@ PUT test-index "properties": { "infer_field": { "type": "semantic_text", - "inference_id": "my-elser-endpoint" + "inference_id": ".elser-2-elasticsearch" }, "source_field": { "type": "text", diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index b24f65fcf97ca..22d4644ede490 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -26,6 +26,23 @@ PUT my-index See <> for a complete example on adding documents to a `sparse_vector` mapped field using ELSER. +[[sparse-vectors-params]] +==== Parameters for `sparse_vector` fields + +The following parameters are accepted by `sparse_vector` fields: + +[horizontal] + +<>:: + +Indicates whether the field value should be stored and retrievable independently of the <> field. +Accepted values: true or false (default). +The field's data is stored using term vectors, a disk-efficient structure compared to the original JSON input. +The input map can be retrieved during a search request via the <>. +To benefit from reduced disk usage, you must either: + * Exclude the field from <>. + * Use <>. + [[index-multi-value-sparse-vectors]] ==== Multi-value sparse vectors diff --git a/docs/reference/migration/migrate_9_0.asciidoc b/docs/reference/migration/migrate_9_0.asciidoc index 5048220966bba..8f0b16e31b56e 100644 --- a/docs/reference/migration/migrate_9_0.asciidoc +++ b/docs/reference/migration/migrate_9_0.asciidoc @@ -244,6 +244,25 @@ The deprecated highlighting `force_source` parameter is no longer supported. Users should remove usages of the `force_source` parameter from their search requests. ==== +[discrete] +[[breaking_90_transforms_changes]] +==== {transforms-cap} changes + +[[updating_deprecated_transform_roles]] +.Updating deprecated {transform} roles (`data_frame_transforms_admin` and `data_frame_transforms_user`) +[%collapsible] +==== +*Details* + +The `data_frame_transforms_admin` and `data_frame_transforms_user` {transform} roles have been deprecated. + +*Impact* + +Users must update any existing {transforms} that use deprecated {transform} roles (`data_frame_transforms_admin` or `data_frame_transforms_user`) to use the new equivalent {transform} roles (`transform_admin` or `transform_user`). +To update the {transform} roles: + +1. Switch to a user with the `transform_admin` role (to replace `data_frame_transforms_admin`) or the `transform_user` role (to replace `data_frame_transforms_user`). +2. Call the <> with that user. +==== + [discrete] [[deprecated-9.0]] diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index d01047eac9815..4948db48664ed 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -18,7 +18,8 @@ end::adaptive-allocation-max-number[] tag::adaptive-allocation-min-number[] Specifies the minimum number of allocations to scale to. -If set, it must be greater than or equal to `1`. +If set, it must be greater than or equal to `0`. +If not defined, the deployment scales to `0`. end::adaptive-allocation-min-number[] tag::aggregations[] diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 506dff7891ad2..c3bf84fa600d2 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1942,3 +1942,8 @@ Refer to <>. === Delete geoip database configuration API Refer to <>. + +[role="exclude",id="knn-search-api"] +=== Delete _knn_search API + +Refer to <>. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index b39afff876eed..70ffe02e44d95 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -50,8 +50,6 @@ include::search/async-search.asciidoc[] include::search/point-in-time-api.asciidoc[] -include::search/knn-search.asciidoc[] - include::search/retriever.asciidoc[] include::search/rrf.asciidoc[] diff --git a/docs/reference/search/knn-search.asciidoc b/docs/reference/search/knn-search.asciidoc deleted file mode 100644 index 78e3e13b09fee..0000000000000 --- a/docs/reference/search/knn-search.asciidoc +++ /dev/null @@ -1,146 +0,0 @@ -[[knn-search-api]] -=== kNN search API -++++ -kNN search -++++ - -deprecated::[8.4.0,"The kNN search API has been replaced by the <> in the search API."] - -Performs a k-nearest neighbor (kNN) search and returns the matching documents. - -//// -[source,console] ----- -PUT my-index -{ - "mappings": { - "properties": { - "image_vector": { - "type": "dense_vector", - "dims": 3, - "index": true, - "similarity": "l2_norm" - } - } - } -} - -PUT my-index/_doc/1?refresh -{ - "image_vector" : [0.5, 10, 6] -} ----- -//// - -[source,console] ----- -GET my-index/_knn_search -{ - "knn": { - "field": "image_vector", - "query_vector": [0.3, 0.1, 1.2], - "k": 10, - "num_candidates": 100 - }, - "_source": ["name", "file_type"] -} ----- -// TEST[continued] -// TEST[warning:The kNN search API has been replaced by the `knn` option in the search API.] - -[[knn-search-api-request]] -==== {api-request-title} - -`GET /_knn_search` - -`POST /_knn_search` - -[[knn-search-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `read` -<> for the target data stream, index, -or alias. - -[[knn-search-api-desc]] -==== {api-description-title} - -The kNN search API performs a k-nearest neighbor (kNN) search on a -<> field. Given a query vector, it finds the _k_ -closest vectors and returns those documents as search hits. - -//tag::hnsw-algorithm[] -{es} uses the https://arxiv.org/abs/1603.09320[HNSW algorithm] to support -efficient kNN search. Like most kNN algorithms, HNSW is an approximate method -that sacrifices result accuracy for improved search speed. This means the -results returned are not always the true _k_ closest neighbors. -//end::hnsw-algorithm[] - -The kNN search API supports restricting the search using a filter. The search -will return the top `k` documents that also match the filter query. - -[[knn-search-api-path-params]] -==== {api-path-parms-title} - -``:: -(Optional, string) Comma-separated list of data streams, indices, and aliases -to search. Supports wildcards (`*`). To search all data streams and indices, -use `*` or `_all`. - -[role="child_attributes"] -[[knn-search-api-query-params]] -==== {api-query-parms-title} - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing] - -[role="child_attributes"] -[[knn-search-api-request-body]] -==== {api-request-body-title} - -`filter`:: -(Optional, <>) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-filter] - -`knn`:: -(Required, object) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn] -+ -.Properties of `knn` object -[%collapsible%open] -==== -`field`:: -(Required, string) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-field] - -`k`:: -(Optional, integer) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-k] - -`num_candidates`:: -(Optional, integer) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] - -`query_vector`:: -(Required, array of floats or string) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector] -==== - -include::{es-ref-dir}/search/search.asciidoc[tag=docvalue-fields-def] -include::{es-ref-dir}/search/search.asciidoc[tag=fields-param-def] -include::{es-ref-dir}/search/search.asciidoc[tag=source-filtering-def] -include::{es-ref-dir}/search/search.asciidoc[tag=stored-fields-def] - -[role="child_attributes"] -[[knn-search-api-response-body]] -==== {api-response-body-title} - -A kNN search response has the exact same structure as a -<>. However, certain sections -have a meaning specific to kNN search: - -* The <> is determined by -the similarity between the query and document vector. See -<>. -* The `hits.total` object contains the total number of nearest neighbor -candidates considered, which is `num_candidates * num_shards`. The -`hits.total.relation` will always be `eq`, indicating an exact value. diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index b90b7e312c790..cb04d4fb6fbf1 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -765,11 +765,11 @@ clauses in a <>. [[retriever-restrictions]] ==== Restrictions on search parameters when specifying a retriever -When a retriever is specified as part of a search, the following elements are not allowed at the top-level. -Instead they are only allowed as elements of specific retrievers: +When a retriever is specified as part of a search, the following elements are not allowed at the top-level: * <> * <> * <> * <> * <> +* <> diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 6fb7f1747051f..59a903b95e4f8 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -1058,8 +1058,10 @@ PUT image-index * When using kNN search in <>, the <> option is not supported. -* {blank} -include::{es-ref-dir}/search/knn-search.asciidoc[tag=hnsw-algorithm] +* {es} uses the https://arxiv.org/abs/1603.09320[HNSW algorithm] to support +efficient kNN search. Like most kNN algorithms, HNSW is an approximate method +that sacrifices result accuracy for improved search speed. This means the +results returned are not always the true _k_ closest neighbors. NOTE: Approximate kNN search always uses the <> search type in order to gather diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index ba9c81db21384..3448940b6fad7 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -14,15 +14,15 @@ You don't need to define model related settings and parameters, or create {infer The recommended way to use <> in the {stack} is following the `semantic_text` workflow. When you need more control over indexing and query settings, you can still use the complete {infer} workflow (refer to <> to review the process). -This tutorial uses the <> for demonstration, but you can use any service and their supported models offered by the {infer-cap} API. +This tutorial uses the <> for demonstration, but you can use any service and their supported models offered by the {infer-cap} API. [discrete] [[semantic-text-requirements]] ==== Requirements -This tutorial uses the <> for demonstration, which is created automatically as needed. -To use the `semantic_text` field type with an {infer} service other than ELSER, you must create an inference endpoint using the <>. +This tutorial uses the <> for demonstration, which is created automatically as needed. +To use the `semantic_text` field type with an {infer} service other than `elasticsearch` service, you must create an inference endpoint using the <>. [discrete] @@ -48,7 +48,7 @@ PUT semantic-embeddings // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings. <2> The field to contain the embeddings is a `semantic_text` field. -Since no `inference_id` is provided, the <> is used by default. +Since no `inference_id` is provided, the default endpoint `.elser-2-elasticsearch` for the <> is used. To use a different {infer} service, you must create an {infer} endpoint first using the <> and then specify it in the `semantic_text` field mapping using the `inference_id` parameter. [NOTE] diff --git a/docs/reference/search/search-your-data/semantic-text-hybrid-search b/docs/reference/search/search-your-data/semantic-text-hybrid-search index c56b283434df5..4b49a7c3155db 100644 --- a/docs/reference/search/search-your-data/semantic-text-hybrid-search +++ b/docs/reference/search/search-your-data/semantic-text-hybrid-search @@ -8,47 +8,12 @@ This tutorial demonstrates how to perform hybrid search, combining semantic sear In hybrid search, semantic search retrieves results based on the meaning of the text, while full-text search focuses on exact word matches. By combining both methods, hybrid search delivers more relevant results, particularly in cases where relying on a single approach may not be sufficient. -The recommended way to use hybrid search in the {stack} is following the `semantic_text` workflow. This tutorial uses the <> for demonstration, but you can use any service and its supported models offered by the {infer-cap} API. - -[discrete] -[[semantic-text-hybrid-infer-endpoint]] -==== Create the {infer} endpoint - -Create an inference endpoint by using the <>: - -[source,console] ------------------------------------------------------------- -PUT _inference/sparse_embedding/my-elser-endpoint <1> -{ - "service": "elser", <2> - "service_settings": { - "adaptive_allocations": { <3> - "enabled": true, - "min_number_of_allocations": 3, - "max_number_of_allocations": 10 - }, - "num_threads": 1 - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The task type is `sparse_embedding` in the path as the `elser` service will -be used and ELSER creates sparse vectors. The `inference_id` is -`my-elser-endpoint`. -<2> The `elser` service is used in this example. -<3> This setting enables and configures adaptive allocations. -Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. - -[NOTE] -==== -You might see a 502 bad gateway error in the response when using the {kib} Console. -This error usually just reflects a timeout, while the model downloads in the background. -You can check the download progress in the {ml-app} UI. -==== +The recommended way to use hybrid search in the {stack} is following the `semantic_text` workflow. +This tutorial uses the <> for demonstration, but you can use any service and their supported models offered by the {infer-cap} API. [discrete] [[hybrid-search-create-index-mapping]] -==== Create an index mapping for hybrid search +==== Create an index mapping The destination index will contain both the embeddings for semantic search and the original text field for full-text search. This structure enables the combination of semantic search and full-text search. @@ -60,11 +25,10 @@ PUT semantic-embeddings "properties": { "semantic_text": { <1> "type": "semantic_text", - "inference_id": "my-elser-endpoint" <2> }, - "content": { <3> + "content": { <2> "type": "text", - "copy_to": "semantic_text" <4> + "copy_to": "semantic_text" <3> } } } @@ -72,9 +36,8 @@ PUT semantic-embeddings ------------------------------------------------------------ // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings for semantic search. -<2> The identifier of the inference endpoint that generates the embeddings based on the input text. -<3> The name of the field to contain the original text for lexical search. -<4> The textual data stored in the `content` field will be copied to `semantic_text` and processed by the {infer} endpoint. +<2> The name of the field to contain the original text for lexical search. +<3> The textual data stored in the `content` field will be copied to `semantic_text` and processed by the {infer} endpoint. [NOTE] ==== diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 58feb55f32e2f..8694d7f5b46c6 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -39,7 +39,7 @@ adjust memory usage in Docker Desktop by going to **Settings > Resources**. ---- docker network create elastic ---- - +// REVIEWED[DEC.10.24] . Pull the {es} Docker image. + -- @@ -52,10 +52,11 @@ endif::[] ---- docker pull {docker-image} ---- +// REVIEWED[DEC.10.24] -- . Optional: Install -https://docs.sigstore.dev/system_config/installation/[Cosign] for your +https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your environment. Then use Cosign to verify the {es} image's signature. + [[docker-verify-signature]] @@ -64,6 +65,7 @@ environment. Then use Cosign to verify the {es} image's signature. wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub {docker-image} ---- +// REVIEWED[DEC.10.24] + The `cosign` command prints the check results and the signature payload in JSON format: + @@ -75,6 +77,7 @@ The following checks were performed on each of these signatures: - Existence of the claims in the transparency log was verified offline - The signatures were verified against the specified public key ---- +// REVIEWED[DEC.10.24] . Start an {es} container. + @@ -82,6 +85,7 @@ The following checks were performed on each of these signatures: ---- docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB {docker-image} ---- +// REVIEWED[DEC.10.24] + TIP: Use the `-m` flag to set a memory limit for the container. This removes the need to <>. @@ -95,6 +99,7 @@ If you intend to use the {ml} capabilities, then start the container with this c ---- docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" {docker-image} ---- +// REVIEWED[DEC.10.24] The command prints the `elastic` user password and an enrollment token for {kib}. . Copy the generated `elastic` password and enrollment token. These credentials @@ -106,6 +111,7 @@ credentials using the following commands. docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana ---- +// REVIEWED[DEC.10.24] + We recommend storing the `elastic` password as an environment variable in your shell. Example: + @@ -113,6 +119,7 @@ We recommend storing the `elastic` password as an environment variable in your s ---- export ELASTIC_PASSWORD="your_password" ---- +// REVIEWED[DEC.10.24] . Copy the `http_ca.crt` SSL certificate from the container to your local machine. + @@ -120,6 +127,7 @@ export ELASTIC_PASSWORD="your_password" ---- docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . ---- +// REVIEWED[DEC.10.24] . Make a REST API call to {es} to ensure the {es} container is running. + @@ -128,6 +136,7 @@ docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 ---- // NOTCONSOLE +// REVIEWED[DEC.10.24] ===== Add more nodes @@ -137,6 +146,7 @@ curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 ---- docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node ---- +// REVIEWED[DEC.10.24] + The enrollment token is valid for 30 minutes. @@ -146,6 +156,7 @@ The enrollment token is valid for 30 minutes. ---- docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB {docker-image} ---- +// REVIEWED[DEC.10.24] . Call the <> to verify the node was added to the cluster. + @@ -154,6 +165,7 @@ docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB {d curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes ---- // NOTCONSOLE +// REVIEWED[DEC.10.24] [[run-kibana-docker]] ===== Run {kib} @@ -170,6 +182,7 @@ endif::[] ---- docker pull {kib-docker-image} ---- +// REVIEWED[DEC.10.24] -- . Optional: Verify the {kib} image's signature. @@ -179,6 +192,7 @@ docker pull {kib-docker-image} wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub {kib-docker-image} ---- +// REVIEWED[DEC.10.24] . Start a {kib} container. + @@ -186,6 +200,7 @@ cosign verify --key cosign.pub {kib-docker-image} ---- docker run --name kib01 --net elastic -p 5601:5601 {kib-docker-image} ---- +// REVIEWED[DEC.10.24] . When {kib} starts, it outputs a unique generated link to the terminal. To access {kib}, open this link in a web browser. @@ -198,6 +213,7 @@ To regenerate the token, run: ---- docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana ---- +// REVIEWED[DEC.10.24] . Log in to {kib} as the `elastic` user with the password that was generated when you started {es}. @@ -208,6 +224,7 @@ To regenerate the password, run: ---- docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic ---- +// REVIEWED[DEC.10.24] [[remove-containers-docker]] ===== Remove containers @@ -226,6 +243,7 @@ docker rm es02 # Remove the {kib} container docker rm kib01 ---- +// REVIEWED[DEC.10.24] ===== Next steps @@ -306,6 +324,7 @@ ES_PORT=127.0.0.1:9200 ---- docker-compose up -d ---- +// REVIEWED[DEC.10.24] . After the cluster has started, open http://localhost:5601 in a web browser to access {kib}. @@ -321,6 +340,7 @@ is preserved and loaded when you restart the cluster with `docker-compose up`. ---- docker-compose down ---- +// REVIEWED[DEC.10.24] To delete the network, containers, and volumes when you stop the cluster, specify the `-v` option: @@ -329,6 +349,7 @@ specify the `-v` option: ---- docker-compose down -v ---- +// REVIEWED[DEC.10.24] ===== Next steps @@ -377,6 +398,7 @@ The `vm.max_map_count` setting must be set within the xhyve virtual machine: -------------------------------------------- screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty -------------------------------------------- +// REVIEWED[DEC.10.24] . Press enter and use `sysctl` to configure `vm.max_map_count`: + @@ -494,6 +516,7 @@ To check the Docker daemon defaults for ulimits, run: -------------------------------------------- docker run --rm {docker-image} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' -------------------------------------------- +// REVIEWED[DEC.10.24] If needed, adjust them in the Daemon or override them per container. For example, when using `docker run`, set: @@ -502,6 +525,7 @@ For example, when using `docker run`, set: -------------------------------------------- --ulimit nofile=65535:65535 -------------------------------------------- +// REVIEWED[DEC.10.24] ===== Disable swapping @@ -518,6 +542,7 @@ When using `docker run`, you can specify: ---- -e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1 ---- +// REVIEWED[DEC.10.24] ===== Randomize published ports @@ -545,6 +570,7 @@ environment variable. For example, to use 1GB, use the following command. ---- docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it {docker-image} ---- +// REVIEWED[DEC.10.24] The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in production. @@ -616,6 +642,7 @@ If you mount the password file to `/run/secrets/bootstrapPassword.txt`, specify: -------------------------------------------- -e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt -------------------------------------------- +// REVIEWED[DEC.10.24] You can override the default command for the image to pass {es} configuration parameters as command line options. For example: diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc index 02995591d9c8a..227eb774a4a9f 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -36,7 +36,7 @@ the `cosine` measures are equivalent. ------------------------------------------------------------ PUT _inference/sparse_embedding/elser_embeddings <1> { - "service": "elser", + "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1 @@ -206,7 +206,7 @@ PUT _inference/text_embedding/google_vertex_ai_embeddings <1> <2> A valid service account in JSON format for the Google Vertex AI API. <3> For the list of the available models, refer to the https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api[Text embeddings API] page. <4> The name of the location to use for the {infer} task. Refer to https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations[Generative AI on Vertex AI locations] for available locations. -<5> The name of the project to use for the {infer} task. +<5> The name of the project to use for the {infer} task. // end::google-vertex-ai[] diff --git a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc index cbb35f7731034..e47b85aa99547 100644 --- a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc @@ -126,7 +126,7 @@ repeatedly-dropped connections will severely affect its operation. The connections from the elected master node to every other node in the cluster are particularly important. The elected master never spontaneously closes its outbound connections to other nodes. Similarly, once an inbound connection is -fully established, a node never spontaneously it unless the node is shutting +fully established, a node never spontaneously closes it unless the node is shutting down. If you see a node unexpectedly leave the cluster with the `disconnected` diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 37178fd9439d0..33addef8aedd0 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -184,6 +184,11 @@ + + + + + @@ -4383,11 +4388,6 @@ - - - - - @@ -4408,9 +4408,9 @@ - - - + + + @@ -4433,11 +4433,6 @@ - - - - - @@ -4478,11 +4473,6 @@ - - - - - @@ -4493,9 +4483,9 @@ - - - + + + diff --git a/libs/core/src/main/java/org/elasticsearch/jdk/RuntimeVersionFeature.java b/libs/core/src/main/java/org/elasticsearch/jdk/RuntimeVersionFeature.java new file mode 100644 index 0000000000000..fe6e73271599f --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/jdk/RuntimeVersionFeature.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.jdk; + +import org.elasticsearch.core.UpdateForV9; + +public class RuntimeVersionFeature { + private RuntimeVersionFeature() {} + + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // Remove once we removed all references to SecurityManager in code + public static boolean isSecurityManagerAvailable() { + return Runtime.version().feature() < 24; + } +} diff --git a/libs/entitlement/asm-provider/build.gradle b/libs/entitlement/asm-provider/build.gradle index 5f968629fe557..dcec0579a5bae 100644 --- a/libs/entitlement/asm-provider/build.gradle +++ b/libs/entitlement/asm-provider/build.gradle @@ -11,10 +11,10 @@ apply plugin: 'elasticsearch.build' dependencies { compileOnly project(':libs:entitlement') - implementation 'org.ow2.asm:asm:9.7' + implementation 'org.ow2.asm:asm:9.7.1' testImplementation project(":test:framework") testImplementation project(":libs:entitlement:bridge") - testImplementation 'org.ow2.asm:asm-util:9.7' + testImplementation 'org.ow2.asm:asm-util:9.7.1' } tasks.named('test').configure { diff --git a/libs/entitlement/entitlements-loading.svg b/libs/entitlement/entitlements-loading.svg new file mode 100644 index 0000000000000..4f0213b853bee --- /dev/null +++ b/libs/entitlement/entitlements-loading.svg @@ -0,0 +1,4 @@ + + + +
ES main
ES main
Boot Loader
Boot Loader
Platform Loader
Platform Loader
System Loader
System Loader
reflection
reflection
Agent Jar
Agent Jar
Server
Server
(Instrumented)
JDK classes
(Instrumented)...
agent main
(in unnamed module)
agent main...
entitlements ready
entitlements ready
reflection
reflection
Bridge
(patched into java.base)
Bridge...
Entitlements
Entitlements
Entitlements bootstrap
Entitlements bootstrap
  • Grant access to unnamed module
  • Set (static, protected) init arguments
  • Load agent
Grant access to unnamed modu...
(reflectively) call 
entitlements init
with Instrumentation
(reflectively) call...
Entitlements init
Entitlements init
  • Load plugin policies
  • Load server policy
  • Create entitlements manager
    • Policies
    • Method to lookup plugin by Module
  • Set entitlements manager in static (accessible by bridge)
  • Instrument jdk classes
  • run self test (force bridge to capture entitlements manager)
Load plugin policiesLoad server policyCreate e...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/libs/entitlement/src/main/java/module-info.java b/libs/entitlement/src/main/java/module-info.java index 54075ba60bbef..b8a125b98e641 100644 --- a/libs/entitlement/src/main/java/module-info.java +++ b/libs/entitlement/src/main/java/module-info.java @@ -17,6 +17,7 @@ requires static org.elasticsearch.entitlement.bridge; // At runtime, this will be in java.base exports org.elasticsearch.entitlement.runtime.api; + exports org.elasticsearch.entitlement.runtime.policy; exports org.elasticsearch.entitlement.instrumentation; exports org.elasticsearch.entitlement.bootstrap to org.elasticsearch.server; exports org.elasticsearch.entitlement.initialization to java.base; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 0ffab5f93969f..fb694308466c6 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -18,6 +18,8 @@ import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.entitlement.instrumentation.Transformer; import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; +import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement; +import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement; import org.elasticsearch.entitlement.runtime.policy.Policy; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; import org.elasticsearch.entitlement.runtime.policy.PolicyParser; @@ -86,9 +88,11 @@ private static Class internalNameToClass(String internalName) { private static PolicyManager createPolicyManager() throws IOException { Map pluginPolicies = createPluginPolicies(EntitlementBootstrap.bootstrapArgs().pluginData()); - // TODO: What should the name be? // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it - var serverPolicy = new Policy("server", List.of()); + var serverPolicy = new Policy( + "server", + List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) + ); return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index 28a080470c043..aa63b630ed7cd 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -10,7 +10,6 @@ package org.elasticsearch.entitlement.runtime.api; import org.elasticsearch.entitlement.bridge.EntitlementChecker; -import org.elasticsearch.entitlement.runtime.policy.FlagEntitlementType; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; import java.net.URL; @@ -30,27 +29,27 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { @Override public void check$java_lang_System$exit(Class callerClass, int status) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.SYSTEM_EXIT); + policyManager.checkExitVM(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override @@ -61,6 +60,6 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { ClassLoader parent, URLStreamHandlerFactory factory ) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } } diff --git a/test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java similarity index 58% rename from test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java index a9981d6951400..138515be9ffcb 100644 --- a/test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java @@ -7,16 +7,9 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch; +package org.elasticsearch.entitlement.runtime.policy; -import java.util.List; - -/** - * Provides access to all known transport versions. - */ -public class KnownTransportVersions { - /** - * A sorted list of all known transport versions - */ - public static final List ALL_VERSIONS = List.copyOf(TransportVersions.getAllVersions()); +public class CreateClassLoaderEntitlement implements Entitlement { + @ExternalEntitlement + public CreateClassLoaderEntitlement() {} } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java similarity index 79% rename from libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java index d40235ee12166..c4a8fc6833581 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java @@ -9,7 +9,7 @@ package org.elasticsearch.entitlement.runtime.policy; -public enum FlagEntitlementType { - SYSTEM_EXIT, - CREATE_CLASSLOADER; -} +/** + * Internal policy type (not-parseable -- not available to plugins). + */ +public class ExitVMEntitlement implements Entitlement {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java index 8df199591d3e4..d0837bc096183 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java @@ -20,6 +20,9 @@ public class FileEntitlement implements Entitlement { public static final int READ_ACTION = 0x1; public static final int WRITE_ACTION = 0x2; + public static final String READ = "read"; + public static final String WRITE = "write"; + private final String path; private final int actions; @@ -29,12 +32,12 @@ public FileEntitlement(String path, List actionsList) { int actionsInt = 0; for (String actionString : actionsList) { - if ("read".equals(actionString)) { + if (READ.equals(actionString)) { if ((actionsInt & READ_ACTION) == READ_ACTION) { throw new IllegalArgumentException("file action [read] specified multiple times"); } actionsInt |= READ_ACTION; - } else if ("write".equals(actionString)) { + } else if (WRITE.equals(actionString)) { if ((actionsInt & WRITE_ACTION) == WRITE_ACTION) { throw new IllegalArgumentException("file action [write] specified multiple times"); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index b3fb5b75a1d5a..a77c86d5ffd04 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -17,17 +17,45 @@ import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; public class PolicyManager { private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); + static class ModuleEntitlements { + public static final ModuleEntitlements NONE = new ModuleEntitlements(List.of()); + private final IdentityHashMap, List> entitlementsByType; + + ModuleEntitlements(List entitlements) { + this.entitlementsByType = entitlements.stream() + .collect(Collectors.toMap(Entitlement::getClass, e -> new ArrayList<>(List.of(e)), (a, b) -> { + a.addAll(b); + return a; + }, IdentityHashMap::new)); + } + + public boolean hasEntitlement(Class entitlementClass) { + return entitlementsByType.containsKey(entitlementClass); + } + + public Stream getEntitlements(Class entitlementClass) { + return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast); + } + } + + final Map moduleEntitlementsMap = new HashMap<>(); + protected final Policy serverPolicy; protected final Map pluginPolicies; private final Function, String> pluginResolver; @@ -56,27 +84,110 @@ public PolicyManager(Policy defaultPolicy, Map pluginPolicies, F this.pluginResolver = pluginResolver; } - public void checkFlagEntitlement(Class callerClass, FlagEntitlementType type) { + private static List lookupEntitlementsForModule(Policy policy, String moduleName) { + for (int i = 0; i < policy.scopes.size(); ++i) { + var scope = policy.scopes.get(i); + if (scope.name.equals(moduleName)) { + return scope.entitlements; + } + } + return null; + } + + public void checkExitVM(Class callerClass) { + checkEntitlementPresent(callerClass, ExitVMEntitlement.class); + } + + public void checkCreateClassLoader(Class callerClass) { + checkEntitlementPresent(callerClass, CreateClassLoaderEntitlement.class); + } + + private void checkEntitlementPresent(Class callerClass, Class entitlementClass) { var requestingModule = requestingModule(callerClass); if (isTriviallyAllowed(requestingModule)) { return; } - // TODO: real policy check. For now, we only allow our hardcoded System.exit policy for server. - // TODO: this will be checked using policies - if (requestingModule.isNamed() - && requestingModule.getName().equals("org.elasticsearch.server") - && (type == FlagEntitlementType.SYSTEM_EXIT || type == FlagEntitlementType.CREATE_CLASSLOADER)) { - logger.debug("Allowed: caller [{}] in module [{}] has entitlement [{}]", callerClass, requestingModule.getName(), type); + ModuleEntitlements entitlements = getEntitlementsOrThrow(callerClass, requestingModule); + if (entitlements.hasEntitlement(entitlementClass)) { + logger.debug( + () -> Strings.format( + "Entitled: caller [%s], module [%s], type [%s]", + callerClass, + requestingModule.getName(), + entitlementClass.getSimpleName() + ) + ); return; } - - // TODO: plugins policy check using pluginResolver and pluginPolicies throw new NotEntitledException( - Strings.format("Missing entitlement [%s] for caller [%s] in module [%s]", type, callerClass, requestingModule.getName()) + Strings.format( + "Missing entitlement: caller [%s], module [%s], type [%s]", + callerClass, + requestingModule.getName(), + entitlementClass.getSimpleName() + ) ); } + ModuleEntitlements getEntitlementsOrThrow(Class callerClass, Module requestingModule) { + ModuleEntitlements cachedEntitlement = moduleEntitlementsMap.get(requestingModule); + if (cachedEntitlement != null) { + if (cachedEntitlement == ModuleEntitlements.NONE) { + throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule) + "[CACHED]"); + } + return cachedEntitlement; + } + + if (isServerModule(requestingModule)) { + var scopeName = requestingModule.getName(); + return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverPolicy, scopeName); + } + + // plugins + var pluginName = pluginResolver.apply(callerClass); + if (pluginName != null) { + var pluginPolicy = pluginPolicies.get(pluginName); + if (pluginPolicy != null) { + final String scopeName; + if (requestingModule.isNamed() == false) { + scopeName = ALL_UNNAMED; + } else { + scopeName = requestingModule.getName(); + } + return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginPolicy, scopeName); + } + } + + moduleEntitlementsMap.put(requestingModule, ModuleEntitlements.NONE); + throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule)); + } + + private static String buildModuleNoPolicyMessage(Class callerClass, Module requestingModule) { + return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName()); + } + + private ModuleEntitlements getModuleEntitlementsOrThrow(Class callerClass, Module module, Policy policy, String moduleName) { + var entitlements = lookupEntitlementsForModule(policy, moduleName); + if (entitlements == null) { + // Module without entitlements - remember we don't have any + moduleEntitlementsMap.put(module, ModuleEntitlements.NONE); + throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module)); + } + // We have a policy for this module + var classEntitlements = createClassEntitlements(entitlements); + moduleEntitlementsMap.put(module, classEntitlements); + return classEntitlements; + } + + private static boolean isServerModule(Module requestingModule) { + return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot(); + } + + private ModuleEntitlements createClassEntitlements(List entitlements) { + return new ModuleEntitlements(entitlements); + } + private static Module requestingModule(Class callerClass) { if (callerClass != null) { Module callerModule = callerClass.getModule(); @@ -102,10 +213,10 @@ private static Module requestingModule(Class callerClass) { private static boolean isTriviallyAllowed(Module requestingModule) { if (requestingModule == null) { - logger.debug("Trivially allowed: entire call stack is in composed of classes in system modules"); + logger.debug("Entitlement trivially allowed: entire call stack is in composed of classes in system modules"); return true; } - logger.trace("Not trivially allowed"); + logger.trace("Entitlement not trivially allowed"); return false; } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java index ea6603af99925..0d1a7c14ece4b 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java @@ -19,22 +19,43 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; - -import static org.elasticsearch.entitlement.runtime.policy.PolicyParserException.newPolicyParserException; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A parser to parse policy files for entitlements. */ public class PolicyParser { - protected static final String entitlementPackageName = Entitlement.class.getPackage().getName(); + private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of(FileEntitlement.class, CreateClassLoaderEntitlement.class) + .collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity())); protected final XContentParser policyParser; protected final String policyName; + static String getEntitlementTypeName(Class entitlementClass) { + var entitlementClassName = entitlementClass.getSimpleName(); + + if (entitlementClassName.endsWith("Entitlement") == false) { + throw new IllegalArgumentException( + entitlementClassName + " is not a valid Entitlement class name. A valid class name must end with 'Entitlement'" + ); + } + + var strippedClassName = entitlementClassName.substring(0, entitlementClassName.indexOf("Entitlement")); + return Arrays.stream(strippedClassName.split("(?=\\p{Lu})")) + .filter(Predicate.not(String::isEmpty)) + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.joining("_")); + } + public PolicyParser(InputStream inputStream, String policyName) throws IOException { this.policyParser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, Objects.requireNonNull(inputStream)); this.policyName = policyName; @@ -67,18 +88,23 @@ protected Scope parseScope(String scopeName) throws IOException { } List entitlements = new ArrayList<>(); while (policyParser.nextToken() != XContentParser.Token.END_ARRAY) { - if (policyParser.currentToken() != XContentParser.Token.START_OBJECT) { - throw newPolicyParserException(scopeName, "expected object "); - } - if (policyParser.nextToken() != XContentParser.Token.FIELD_NAME) { + if (policyParser.currentToken() == XContentParser.Token.VALUE_STRING) { + String entitlementType = policyParser.text(); + Entitlement entitlement = parseEntitlement(scopeName, entitlementType); + entitlements.add(entitlement); + } else if (policyParser.currentToken() == XContentParser.Token.START_OBJECT) { + if (policyParser.nextToken() != XContentParser.Token.FIELD_NAME) { + throw newPolicyParserException(scopeName, "expected object "); + } + String entitlementType = policyParser.currentName(); + Entitlement entitlement = parseEntitlement(scopeName, entitlementType); + entitlements.add(entitlement); + if (policyParser.nextToken() != XContentParser.Token.END_OBJECT) { + throw newPolicyParserException(scopeName, "expected closing object"); + } + } else { throw newPolicyParserException(scopeName, "expected object "); } - String entitlementType = policyParser.currentName(); - Entitlement entitlement = parseEntitlement(scopeName, entitlementType); - entitlements.add(entitlement); - if (policyParser.nextToken() != XContentParser.Token.END_OBJECT) { - throw newPolicyParserException(scopeName, "expected closing object"); - } } return new Scope(scopeName, entitlements); } catch (IOException ioe) { @@ -87,34 +113,29 @@ protected Scope parseScope(String scopeName) throws IOException { } protected Entitlement parseEntitlement(String scopeName, String entitlementType) throws IOException { - Class entitlementClass; - try { - entitlementClass = Class.forName( - entitlementPackageName - + "." - + Character.toUpperCase(entitlementType.charAt(0)) - + entitlementType.substring(1) - + "Entitlement" - ); - } catch (ClassNotFoundException cnfe) { - throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); - } - if (Entitlement.class.isAssignableFrom(entitlementClass) == false) { + Class entitlementClass = EXTERNAL_ENTITLEMENTS.get(entitlementType); + + if (entitlementClass == null) { throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); } + Constructor entitlementConstructor = entitlementClass.getConstructors()[0]; ExternalEntitlement entitlementMetadata = entitlementConstructor.getAnnotation(ExternalEntitlement.class); if (entitlementMetadata == null) { throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); } - if (policyParser.nextToken() != XContentParser.Token.START_OBJECT) { - throw newPolicyParserException(scopeName, entitlementType, "expected entitlement parameters"); + Class[] parameterTypes = entitlementConstructor.getParameterTypes(); + String[] parametersNames = entitlementMetadata.parameterNames(); + + if (parameterTypes.length != 0 || parametersNames.length != 0) { + if (policyParser.nextToken() != XContentParser.Token.START_OBJECT) { + throw newPolicyParserException(scopeName, entitlementType, "expected entitlement parameters"); + } } + Map parsedValues = policyParser.map(); - Class[] parameterTypes = entitlementConstructor.getParameterTypes(); - String[] parametersNames = entitlementMetadata.parameterNames(); Object[] parameterValues = new Object[parameterTypes.length]; for (int parameterIndex = 0; parameterIndex < parameterTypes.length; ++parameterIndex) { String parameterName = parametersNames[parameterIndex]; diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java new file mode 100644 index 0000000000000..45bdf2e457824 --- /dev/null +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +import org.elasticsearch.entitlement.runtime.api.NotEntitledException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; + +import java.io.IOException; +import java.lang.module.Configuration; +import java.lang.module.ModuleFinder; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Map.entry; +import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +@ESTestCase.WithoutSecurityManager +public class PolicyManagerTests extends ESTestCase { + + public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.of("plugin1", createPluginPolicy("plugin.module")), + c -> "plugin1" + ); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var ex = assertThrows( + "No policy for the unnamed module", + NotEntitledException.class, + () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) + ); + + assertEquals( + "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", + ex.getMessage() + ); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + } + + public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var ex = assertThrows( + "No policy for this plugin", + NotEntitledException.class, + () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) + ); + + assertEquals( + "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", + ex.getMessage() + ); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + } + + public void testGetEntitlementsFailureIsCached() { + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + + // A second time + var ex = assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + + assertThat(ex.getMessage(), endsWith("[CACHED]")); + // Nothing new in the map + assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + } + + public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), + c -> "plugin2" + ); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + } + + public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { + var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null); + + // Tests do not run modular, so we cannot use a server class. + // But we know that in production code the server module and its classes are in the boot layer. + // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is + // loaded too early) to mimic a class that would be in the server module. + var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); + var requestingModule = mockServerClass.getModule(); + + var ex = assertThrows( + "No policy for this module in server", + NotEntitledException.class, + () -> policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule) + ); + + assertEquals( + "Missing entitlement policy: caller [class com.sun.net.httpserver.HttpServer], module [jdk.httpserver]", + ex.getMessage() + ); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + } + + public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { + var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null); + + // Tests do not run modular, so we cannot use a server class. + // But we know that in production code the server module and its classes are in the boot layer. + // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is + // loaded too early) to mimic a class that would be in the server module. + var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); + var requestingModule = mockServerClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true)); + } + + public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + + Path jar = creteMockPluginJar(home); + + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), + c -> "mock-plugin" + ); + + var layer = createLayerForJar(jar, "org.example.plugin"); + var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B"); + var requestingModule = mockPluginClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(mockPluginClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + assertThat( + entitlements.getEntitlements(FileEntitlement.class).toList(), + contains(transformedMatch(FileEntitlement::toString, containsString("/test/path"))) + ); + } + + public void testGetEntitlementsResultIsCached() { + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), + c -> "plugin2" + ); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get(); + var entitlementsAgain = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + + // Nothing new in the map + assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + assertThat(entitlementsAgain, sameInstance(cachedResult)); + } + + private static Policy createEmptyTestServerPolicy() { + return new Policy("server", List.of()); + } + + private static Policy createTestServerPolicy(String scopeName) { + return new Policy("server", List.of(new Scope(scopeName, List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())))); + } + + private static Policy createPluginPolicy(String... pluginModules) { + return new Policy( + "plugin", + Arrays.stream(pluginModules) + .map( + name -> new Scope( + name, + List.of(new FileEntitlement("/test/path", List.of(FileEntitlement.READ)), new CreateClassLoaderEntitlement()) + ) + ) + .toList() + ); + } + + private static Path creteMockPluginJar(Path home) throws IOException { + Path jar = home.resolve("mock-plugin.jar"); + + Map sources = Map.ofEntries( + entry("module-info", "module org.example.plugin { exports q; }"), + entry("q.B", "package q; public class B { }") + ); + + var classToBytes = InMemoryJavaCompiler.compile(sources); + JarUtils.createJarWithEntries( + jar, + Map.ofEntries(entry("module-info.class", classToBytes.get("module-info")), entry("q/B.class", classToBytes.get("q.B"))) + ); + return jar; + } + + private static ModuleLayer createLayerForJar(Path jar, String moduleName) { + Configuration cf = ModuleLayer.boot().configuration().resolve(ModuleFinder.of(jar), ModuleFinder.of(), Set.of(moduleName)); + var moduleController = ModuleLayer.defineModulesWithOneLoader( + cf, + List.of(ModuleLayer.boot()), + ClassLoader.getPlatformClassLoader() + ); + return moduleController.layer(); + } +} diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java index de8280ea87fe5..7eb2b1fb476b3 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; -import java.io.IOException; import java.nio.charset.StandardCharsets; public class PolicyParserFailureTests extends ESTestCase { @@ -26,7 +25,7 @@ public void testParserSyntaxFailures() { assertEquals("[1:1] policy parsing error for [test-failure-policy.yaml]: expected object ", ppe.getMessage()); } - public void testEntitlementDoesNotExist() throws IOException { + public void testEntitlementDoesNotExist() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - does_not_exist: {} @@ -38,7 +37,7 @@ public void testEntitlementDoesNotExist() throws IOException { ); } - public void testEntitlementMissingParameter() throws IOException { + public void testEntitlementMissingParameter() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - file: {} @@ -61,7 +60,7 @@ public void testEntitlementMissingParameter() throws IOException { ); } - public void testEntitlementExtraneousParameter() throws IOException { + public void testEntitlementExtraneousParameter() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - file: diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java index 40016b2e3027e..a514cfe418895 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java @@ -11,11 +11,31 @@ import org.elasticsearch.test.ESTestCase; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.List; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + public class PolicyParserTests extends ESTestCase { + private static class TestWrongEntitlementName implements Entitlement {} + + public void testGetEntitlementTypeName() { + assertEquals("create_class_loader", PolicyParser.getEntitlementTypeName(CreateClassLoaderEntitlement.class)); + + var ex = expectThrows(IllegalArgumentException.class, () -> PolicyParser.getEntitlementTypeName(TestWrongEntitlementName.class)); + assertThat( + ex.getMessage(), + equalTo("TestWrongEntitlementName is not a valid Entitlement class name. A valid class name must end with 'Entitlement'") + ); + } + public void testPolicyBuilder() throws IOException { Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml") .parsePolicy(); @@ -25,4 +45,23 @@ public void testPolicyBuilder() throws IOException { ); assertEquals(parsedPolicy, builtPolicy); } + + public void testParseCreateClassloader() throws IOException { + Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream(""" + entitlement-module-name: + - create_class_loader + """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml").parsePolicy(); + Policy builtPolicy = new Policy( + "test-policy.yaml", + List.of(new Scope("entitlement-module-name", List.of(new CreateClassLoaderEntitlement()))) + ); + assertThat( + parsedPolicy.scopes, + contains( + both(transformedMatch((Scope scope) -> scope.name, equalTo("entitlement-module-name"))).and( + transformedMatch(scope -> scope.entitlements, contains(instanceOf(CreateClassLoaderEntitlement.class))) + ) + ) + ); + } } diff --git a/libs/entitlement/tools/securitymanager-scanner/build.gradle b/libs/entitlement/tools/securitymanager-scanner/build.gradle index 8d035c9e847c6..ebb671e5487ef 100644 --- a/libs/entitlement/tools/securitymanager-scanner/build.gradle +++ b/libs/entitlement/tools/securitymanager-scanner/build.gradle @@ -47,8 +47,8 @@ repositories { dependencies { compileOnly(project(':libs:core')) - implementation 'org.ow2.asm:asm:9.7' - implementation 'org.ow2.asm:asm-util:9.7' + implementation 'org.ow2.asm:asm:9.7.1' + implementation 'org.ow2.asm:asm-util:9.7.1' implementation(project(':libs:entitlement:tools:common')) } diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java new file mode 100644 index 0000000000000..eee4a62c7d588 --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java @@ -0,0 +1,356 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.geometry.utils; + +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.GeometryVisitor; +import org.elasticsearch.geometry.Line; +import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; +import org.elasticsearch.geometry.MultiPolygon; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.geometry.Rectangle; + +import java.util.Locale; +import java.util.Optional; + +/** + * This visitor is designed to determine the spatial envelope (or BBOX or MBR) of a potentially complex geometry. + * It has two modes: + *
    + *
  • + * Cartesian mode: The envelope is determined by the minimum and maximum x/y coordinates. + * Incoming BBOX geometries with minX > maxX are treated as invalid. + * Resulting BBOX geometries will always have minX <= maxX. + *
  • + *
  • + * Geographic mode: The envelope is determined by the minimum and maximum x/y coordinates, + * considering the possibility of wrapping the longitude around the dateline. + * A bounding box can be determined either by wrapping the longitude around the dateline or not, + * and the smaller bounding box is chosen. It is possible to disable the wrapping of the longitude. + *
+ * Usage of this is as simple as: + * + * Optional<Rectangle> bbox = SpatialEnvelopeVisitor.visit(geometry); + * if (bbox.isPresent()) { + * Rectangle envelope = bbox.get(); + * // Do stuff with the envelope + * } + * + * It is also possible to create the inner PointVisitor separately, as well as use the visitor for multiple geometries. + * + * PointVisitor pointVisitor = new CartesianPointVisitor(); + * SpatialEnvelopeVisitor visitor = new SpatialEnvelopeVisitor(pointVisitor); + * for (Geometry geometry : geometries) { + * geometry.visit(visitor); + * } + * if (visitor.isValid()) { + * Rectangle envelope = visitor.getResult(); + * // Do stuff with the envelope + * } + * + * Code that wishes to modify the behaviour of the visitor can implement the PointVisitor interface, + * or extend the existing implementations. + */ +public class SpatialEnvelopeVisitor implements GeometryVisitor { + + private final PointVisitor pointVisitor; + + public SpatialEnvelopeVisitor(PointVisitor pointVisitor) { + this.pointVisitor = pointVisitor; + } + + /** + * Determine the BBOX without considering the CRS or wrapping of the longitude. + * Note that incoming BBOX's that do cross the dateline (minx>maxx) will be treated as invalid. + */ + public static Optional visitCartesian(Geometry geometry) { + var visitor = new SpatialEnvelopeVisitor(new CartesianPointVisitor()); + if (geometry.visit(visitor)) { + return Optional.of(visitor.getResult()); + } + return Optional.empty(); + } + + /** + * Determine the BBOX assuming the CRS is geographic (eg WGS84) and optionally wrapping the longitude around the dateline. + */ + public static Optional visitGeo(Geometry geometry, boolean wrapLongitude) { + var visitor = new SpatialEnvelopeVisitor(new GeoPointVisitor(wrapLongitude)); + if (geometry.visit(visitor)) { + return Optional.of(visitor.getResult()); + } + return Optional.empty(); + } + + public Rectangle getResult() { + return pointVisitor.getResult(); + } + + /** + * Visitor for visiting points and rectangles. This is where the actual envelope calculation happens. + * There are two implementations, one for cartesian coordinates and one for geographic coordinates. + * The latter can optionally wrap the longitude around the dateline. + */ + public interface PointVisitor { + void visitPoint(double x, double y); + + void visitRectangle(double minX, double maxX, double maxY, double minY); + + boolean isValid(); + + Rectangle getResult(); + } + + /** + * The cartesian point visitor determines the envelope by the minimum and maximum x/y coordinates. + * It also disallows invalid rectangles where minX > maxX. + */ + public static class CartesianPointVisitor implements PointVisitor { + private double minX = Double.POSITIVE_INFINITY; + private double minY = Double.POSITIVE_INFINITY; + private double maxX = Double.NEGATIVE_INFINITY; + private double maxY = Double.NEGATIVE_INFINITY; + + public double getMinX() { + return minX; + } + + public double getMinY() { + return minY; + } + + public double getMaxX() { + return maxX; + } + + public double getMaxY() { + return maxY; + } + + @Override + public void visitPoint(double x, double y) { + minX = Math.min(minX, x); + minY = Math.min(minY, y); + maxX = Math.max(maxX, x); + maxY = Math.max(maxY, y); + } + + @Override + public void visitRectangle(double minX, double maxX, double maxY, double minY) { + if (minX > maxX) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Invalid cartesian rectangle: minX (%s) > maxX (%s)", minX, maxX) + ); + } + this.minX = Math.min(this.minX, minX); + this.minY = Math.min(this.minY, minY); + this.maxX = Math.max(this.maxX, maxX); + this.maxY = Math.max(this.maxY, maxY); + } + + @Override + public boolean isValid() { + return minY != Double.POSITIVE_INFINITY; + } + + @Override + public Rectangle getResult() { + return new Rectangle(minX, maxX, maxY, minY); + } + } + + /** + * The geographic point visitor determines the envelope by the minimum and maximum x/y coordinates, + * while allowing for wrapping the longitude around the dateline. + * When longitude wrapping is enabled, the visitor will determine the smallest bounding box between the two choices: + *
    + *
  • Wrapping around the front of the earth, in which case the result will have minx < maxx
  • + *
  • Wrapping around the back of the earth, crossing the dateline, in which case the result will have minx > maxx
  • + *
+ */ + public static class GeoPointVisitor implements PointVisitor { + private double minY = Double.POSITIVE_INFINITY; + private double maxY = Double.NEGATIVE_INFINITY; + private double minNegX = Double.POSITIVE_INFINITY; + private double maxNegX = Double.NEGATIVE_INFINITY; + private double minPosX = Double.POSITIVE_INFINITY; + private double maxPosX = Double.NEGATIVE_INFINITY; + + public double getMinY() { + return minY; + } + + public double getMaxY() { + return maxY; + } + + public double getMinNegX() { + return minNegX; + } + + public double getMaxNegX() { + return maxNegX; + } + + public double getMinPosX() { + return minPosX; + } + + public double getMaxPosX() { + return maxPosX; + } + + private final boolean wrapLongitude; + + public GeoPointVisitor(boolean wrapLongitude) { + this.wrapLongitude = wrapLongitude; + } + + @Override + public void visitPoint(double x, double y) { + minY = Math.min(minY, y); + maxY = Math.max(maxY, y); + visitLongitude(x); + } + + @Override + public void visitRectangle(double minX, double maxX, double maxY, double minY) { + this.minY = Math.min(this.minY, minY); + this.maxY = Math.max(this.maxY, maxY); + visitLongitude(minX); + visitLongitude(maxX); + } + + private void visitLongitude(double x) { + if (x >= 0) { + minPosX = Math.min(minPosX, x); + maxPosX = Math.max(maxPosX, x); + } else { + minNegX = Math.min(minNegX, x); + maxNegX = Math.max(maxNegX, x); + } + } + + @Override + public boolean isValid() { + return minY != Double.POSITIVE_INFINITY; + } + + @Override + public Rectangle getResult() { + return getResult(minNegX, minPosX, maxNegX, maxPosX, maxY, minY, wrapLongitude); + } + + private static Rectangle getResult( + double minNegX, + double minPosX, + double maxNegX, + double maxPosX, + double maxY, + double minY, + boolean wrapLongitude + ) { + assert Double.isFinite(maxY); + if (Double.isInfinite(minPosX)) { + return new Rectangle(minNegX, maxNegX, maxY, minY); + } else if (Double.isInfinite(minNegX)) { + return new Rectangle(minPosX, maxPosX, maxY, minY); + } else if (wrapLongitude) { + double unwrappedWidth = maxPosX - minNegX; + double wrappedWidth = (180 - minPosX) - (-180 - maxNegX); + if (unwrappedWidth <= wrappedWidth) { + return new Rectangle(minNegX, maxPosX, maxY, minY); + } else { + return new Rectangle(minPosX, maxNegX, maxY, minY); + } + } else { + return new Rectangle(minNegX, maxPosX, maxY, minY); + } + } + } + + private boolean isValid() { + return pointVisitor.isValid(); + } + + @Override + public Boolean visit(Circle circle) throws RuntimeException { + // TODO: Support circle, if given CRS (needed for radius to x/y coordinate transformation) + throw new UnsupportedOperationException("Circle is not supported"); + } + + @Override + public Boolean visit(GeometryCollection collection) throws RuntimeException { + collection.forEach(geometry -> geometry.visit(this)); + return isValid(); + } + + @Override + public Boolean visit(Line line) throws RuntimeException { + for (int i = 0; i < line.length(); i++) { + pointVisitor.visitPoint(line.getX(i), line.getY(i)); + } + return isValid(); + } + + @Override + public Boolean visit(LinearRing ring) throws RuntimeException { + for (int i = 0; i < ring.length(); i++) { + pointVisitor.visitPoint(ring.getX(i), ring.getY(i)); + } + return isValid(); + } + + @Override + public Boolean visit(MultiLine multiLine) throws RuntimeException { + multiLine.forEach(line -> line.visit(this)); + return isValid(); + } + + @Override + public Boolean visit(MultiPoint multiPoint) throws RuntimeException { + for (int i = 0; i < multiPoint.size(); i++) { + visit(multiPoint.get(i)); + } + return isValid(); + } + + @Override + public Boolean visit(MultiPolygon multiPolygon) throws RuntimeException { + multiPolygon.forEach(polygon -> polygon.visit(this)); + return isValid(); + } + + @Override + public Boolean visit(Point point) throws RuntimeException { + pointVisitor.visitPoint(point.getX(), point.getY()); + return isValid(); + } + + @Override + public Boolean visit(Polygon polygon) throws RuntimeException { + visit(polygon.getPolygon()); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + visit(polygon.getHole(i)); + } + return isValid(); + } + + @Override + public Boolean visit(Rectangle rectangle) throws RuntimeException { + pointVisitor.visitRectangle(rectangle.getMinX(), rectangle.getMaxX(), rectangle.getMaxY(), rectangle.getMinY()); + return isValid(); + } +} diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitorTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitorTests.java new file mode 100644 index 0000000000000..fc35df295e566 --- /dev/null +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitorTests.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.geometry.utils; + +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SpatialEnvelopeVisitorTests extends ESTestCase { + + public void testVisitCartesianShape() { + for (int i = 0; i < 1000; i++) { + var geometry = ShapeTestUtils.randomGeometryWithoutCircle(0, false); + var bbox = SpatialEnvelopeVisitor.visitCartesian(geometry); + assertNotNull(bbox); + assertTrue(i + ": " + geometry, bbox.isPresent()); + var result = bbox.get(); + assertThat(i + ": " + geometry, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + geometry, result.getMinY(), lessThanOrEqualTo(result.getMaxY())); + } + } + + public void testVisitGeoShapeNoWrap() { + for (int i = 0; i < 1000; i++) { + var geometry = GeometryTestUtils.randomGeometryWithoutCircle(0, false); + var bbox = SpatialEnvelopeVisitor.visitGeo(geometry, false); + assertNotNull(bbox); + assertTrue(i + ": " + geometry, bbox.isPresent()); + var result = bbox.get(); + assertThat(i + ": " + geometry, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + geometry, result.getMinY(), lessThanOrEqualTo(result.getMaxY())); + } + } + + public void testVisitGeoShapeWrap() { + for (int i = 0; i < 1000; i++) { + var geometry = GeometryTestUtils.randomGeometryWithoutCircle(0, true); + var bbox = SpatialEnvelopeVisitor.visitGeo(geometry, false); + assertNotNull(bbox); + assertTrue(i + ": " + geometry, bbox.isPresent()); + var result = bbox.get(); + assertThat(i + ": " + geometry, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + geometry, result.getMinY(), lessThanOrEqualTo(result.getMaxY())); + } + } + + public void testVisitCartesianPoints() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.CartesianPointVisitor()); + double minX = Double.MAX_VALUE; + double minY = Double.MAX_VALUE; + double maxX = -Double.MAX_VALUE; + double maxY = -Double.MAX_VALUE; + for (int i = 0; i < 1000; i++) { + var x = randomFloat(); + var y = randomFloat(); + var point = new Point(x, y); + visitor.visit(point); + minX = Math.min(minX, x); + minY = Math.min(minY, y); + maxX = Math.max(maxX, x); + maxY = Math.max(maxY, y); + var result = visitor.getResult(); + assertThat(i + ": " + point, result.getMinX(), equalTo(minX)); + assertThat(i + ": " + point, result.getMinY(), equalTo(minY)); + assertThat(i + ": " + point, result.getMaxX(), equalTo(maxX)); + assertThat(i + ": " + point, result.getMaxY(), equalTo(maxY)); + } + } + + public void testVisitGeoPointsNoWrapping() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.GeoPointVisitor(false)); + double minY = Double.MAX_VALUE; + double maxY = -Double.MAX_VALUE; + double minX = Double.MAX_VALUE; + double maxX = -Double.MAX_VALUE; + for (int i = 0; i < 1000; i++) { + var point = GeometryTestUtils.randomPoint(); + visitor.visit(point); + minY = Math.min(minY, point.getY()); + maxY = Math.max(maxY, point.getY()); + minX = Math.min(minX, point.getX()); + maxX = Math.max(maxX, point.getX()); + var result = visitor.getResult(); + assertThat(i + ": " + point, result.getMinX(), lessThanOrEqualTo(result.getMaxX())); + assertThat(i + ": " + point, result.getMinX(), equalTo(minX)); + assertThat(i + ": " + point, result.getMinY(), equalTo(minY)); + assertThat(i + ": " + point, result.getMaxX(), equalTo(maxX)); + assertThat(i + ": " + point, result.getMaxY(), equalTo(maxY)); + } + } + + public void testVisitGeoPointsWrapping() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.GeoPointVisitor(true)); + double minY = Double.POSITIVE_INFINITY; + double maxY = Double.NEGATIVE_INFINITY; + double minNegX = Double.POSITIVE_INFINITY; + double maxNegX = Double.NEGATIVE_INFINITY; + double minPosX = Double.POSITIVE_INFINITY; + double maxPosX = Double.NEGATIVE_INFINITY; + for (int i = 0; i < 1000; i++) { + var point = GeometryTestUtils.randomPoint(); + visitor.visit(point); + minY = Math.min(minY, point.getY()); + maxY = Math.max(maxY, point.getY()); + if (point.getX() >= 0) { + minPosX = Math.min(minPosX, point.getX()); + maxPosX = Math.max(maxPosX, point.getX()); + } else { + minNegX = Math.min(minNegX, point.getX()); + maxNegX = Math.max(maxNegX, point.getX()); + } + var result = visitor.getResult(); + if (Double.isInfinite(minPosX)) { + // Only negative x values were considered + assertRectangleResult(i + ": " + point, result, minNegX, maxNegX, maxY, minY, false); + } else if (Double.isInfinite(minNegX)) { + // Only positive x values were considered + assertRectangleResult(i + ": " + point, result, minPosX, maxPosX, maxY, minY, false); + } else { + // Both positive and negative x values exist, we need to decide which way to wrap the bbox + double unwrappedWidth = maxPosX - minNegX; + double wrappedWidth = (180 - minPosX) - (-180 - maxNegX); + if (unwrappedWidth <= wrappedWidth) { + // The smaller bbox is around the front of the planet, no dateline wrapping required + assertRectangleResult(i + ": " + point, result, minNegX, maxPosX, maxY, minY, false); + } else { + // The smaller bbox is around the back of the planet, dateline wrapping required (minx > maxx) + assertRectangleResult(i + ": " + point, result, minPosX, maxNegX, maxY, minY, true); + } + } + } + } + + public void testWillCrossDateline() { + var visitor = new SpatialEnvelopeVisitor(new SpatialEnvelopeVisitor.GeoPointVisitor(true)); + visitor.visit(new Point(-90.0, 0.0)); + visitor.visit(new Point(90.0, 0.0)); + assertCrossesDateline(visitor, false); + visitor.visit(new Point(-89.0, 0.0)); + visitor.visit(new Point(89.0, 0.0)); + assertCrossesDateline(visitor, false); + visitor.visit(new Point(-100.0, 0.0)); + visitor.visit(new Point(100.0, 0.0)); + assertCrossesDateline(visitor, true); + visitor.visit(new Point(-70.0, 0.0)); + visitor.visit(new Point(70.0, 0.0)); + assertCrossesDateline(visitor, false); + visitor.visit(new Point(-120.0, 0.0)); + visitor.visit(new Point(120.0, 0.0)); + assertCrossesDateline(visitor, true); + } + + private void assertCrossesDateline(SpatialEnvelopeVisitor visitor, boolean crossesDateline) { + var result = visitor.getResult(); + if (crossesDateline) { + assertThat("Crosses dateline, minx>maxx", result.getMinX(), greaterThanOrEqualTo(result.getMaxX())); + } else { + assertThat("Does not cross dateline, minx asArray = settings.getAsList("ignored_scripts"); Set scripts = new HashSet<>(Arrays.asList("han", "hiragana", "katakana", "hangul")); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CJKWidthFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CJKWidthFilterFactory.java index 07e28c5a4924f..717bfc1191ab2 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CJKWidthFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CJKWidthFilterFactory.java @@ -20,7 +20,7 @@ public final class CJKWidthFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory { CJKWidthFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java index 2be6f220e4441..b134a1d2ab0fb 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CatalanAnalyzerProvider.java @@ -22,7 +22,7 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider arrayKeepTypes = settings.getAsList(KEEP_TYPES_KEY, null); if ((arrayKeepTypes == null)) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepWordFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepWordFilterFactory.java index 0fa763d627a7a..e2be82df10600 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepWordFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeepWordFilterFactory.java @@ -51,7 +51,7 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory { private static final String ENABLE_POS_INC_KEY = "enable_position_increments"; KeepWordFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); final List arrayKeepWords = settings.getAsList(KEEP_WORDS_KEY, null); final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordAnalyzerProvider.java index c43327cf508cc..c4cd39c4f441f 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KeywordAnalyzerProvider.java @@ -20,7 +20,7 @@ public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider regexes = settings.getAsList(PATTERNS_KEY, null, false); if (regexes == null) { throw new IllegalArgumentException("required setting '" + PATTERNS_KEY + "' is missing for token filter [" + name + "]"); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternReplaceTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternReplaceTokenFilterFactory.java index cd15b05aabfbe..d90b7a182cab3 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternReplaceTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternReplaceTokenFilterFactory.java @@ -27,7 +27,7 @@ public class PatternReplaceTokenFilterFactory extends AbstractTokenFilterFactory private final boolean all; public PatternReplaceTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); String sPattern = settings.get("pattern", null); if (sPattern == null) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java index b8f2e194c2ca0..311d63f4d011a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java @@ -25,7 +25,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory { private final int group; PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings, name); + super(name); String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/); if (sPattern == null) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java index 917a45188123c..b4cb8b8003094 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianAnalyzerProvider.java @@ -35,7 +35,7 @@ public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider filterNames; ScriptedConditionTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) { - super(name, settings); + super(name); Settings scriptSettings = settings.getAsSettings("script"); Script script = Script.parse(scriptSettings); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java index 6dc899be95875..e36aaa9756e70 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SerbianAnalyzerProvider.java @@ -22,7 +22,7 @@ public class SerbianAnalyzerProvider extends AbstractIndexAnalyzerProvider rules = Analysis.getWordList(env, settings, "rules"); if (rules == null) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index 7548c8ad2b88b..a6e9fccd9d09c 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -92,7 +92,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(StemmerTokenFilterFactory.class); StemmerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException { - super(name, settings); + super(name); this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter"))); // check that we have a valid language by trying to create a TokenStream create(EMPTY_TOKEN_STREAM).close(); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StopAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StopAnalyzerProvider.java index 0977d08d0fd48..983a9410fba2d 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StopAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StopAnalyzerProvider.java @@ -23,7 +23,7 @@ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider DIGIT diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WordDelimiterTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WordDelimiterTokenFilterFactory.java index 083594f6ab02e..d20a837e92bf6 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WordDelimiterTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WordDelimiterTokenFilterFactory.java @@ -47,7 +47,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory @SuppressWarnings("HiddenField") public WordDelimiterTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); // Sample Format for the type table: // $ => DIGIT diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java index 7fc1bf6882354..211278d0ece41 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java @@ -20,7 +20,7 @@ public class XLowerCaseTokenizerFactory extends AbstractTokenizerFactory { public XLowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings, name); + super(name); } @Override diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java index 69dd8e91b52b2..92c134b5c0f56 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java @@ -64,7 +64,6 @@ public void testDictionaryDecompounder() throws Exception { hasItems("donau", "dampf", "schiff", "donaudampfschiff", "spargel", "creme", "suppe", "spargelcremesuppe") ); } - assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); } public void testHyphenationDecompoundingAnalyzerOnlyLongestMatch() throws Exception { @@ -76,7 +75,6 @@ public void testHyphenationDecompoundingAnalyzerOnlyLongestMatch() throws Except hasItems("kaffeemaschine", "kaffee", "fee", "maschine", "fussballpumpe", "fussball", "ballpumpe", "pumpe") ); } - assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); } /** @@ -89,7 +87,6 @@ public void testHyphenationDecompoundingAnalyzerNoSubMatches() throws Exception List terms = analyze(settings, "hyphenationDecompoundingAnalyzerNoSubMatches", "kaffeemaschine fussballpumpe"); MatcherAssert.assertThat(terms, hasItems("kaffeemaschine", "kaffee", "maschine", "fussballpumpe", "fussball", "ballpumpe")); } - assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); } /** @@ -102,7 +99,6 @@ public void testHyphenationDecompoundingAnalyzerNoOverlappingMatches() throws Ex List terms = analyze(settings, "hyphenationDecompoundingAnalyzerNoOverlappingMatches", "kaffeemaschine fussballpumpe"); MatcherAssert.assertThat(terms, hasItems("kaffeemaschine", "kaffee", "maschine", "fussballpumpe", "fussball", "pumpe")); } - assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); } private List analyze(Settings settings, String analyzerName, String text) throws IOException { diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index b017ae9921b0e..8ae56101ef01e 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -42,4 +42,32 @@ if (buildParams.isSnapshotBuild() == false) { tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("data_stream/10_basic/Create hidden data stream", "warning does not exist for compatibility") + + // Failure store configuration changed on 8.18 (earlier versions behind feature flag) + task.skipTest("data_stream/10_basic/Create data stream with failure store", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/10_basic/Delete data stream with failure store", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/10_basic/Delete data stream with failure store uninitialized", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/30_auto_create_data_stream/Don't initialize failure store during data stream auto-creation on successful index", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/150_tsdb/TSDB failures go to failure store", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/170_modify_data_stream/Modify a data stream's failure store", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/190_failure_store_redirection/Failure redirects to original failure store during index change if final pipeline changes target", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Ensure failure is redirected to correct failure store after a reroute processor", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Test failure store status with bulk request", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Redirect ingest failure in data stream to failure store", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Failure redirects to correct failure store when pipeline loop is detected", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Failure redirects to correct failure store when index loop is detected", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Failure redirects to original failure store during index change if self referenced", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Redirect shard failure in data stream to failure store", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/190_failure_store_redirection/Version conflicts are not redirected to failure store", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after an ingest failure", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/200_rollover_failure_store/A failure store marked for lazy rollover should only be rolled over when there is a failure", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store without conditions", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.") + task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.") }) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 777ddc28fefdc..0e03045a090f8 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -67,6 +67,7 @@ import org.elasticsearch.cluster.metadata.DataStreamAction; import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexWriteLoad; @@ -2447,9 +2448,10 @@ static void putComposableIndexTemplate( .mappings(mappings == null ? null : CompressedXContent.fromJSON(mappings)) .aliases(aliases) .lifecycle(lifecycle) + .dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(withFailureStore)) ) .metadata(metadata) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, withFailureStore)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java index 2c9b7417b2832..d27ec04179e1a 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -165,8 +166,8 @@ private void createDataStreamWithFailureStore() throws IOException { request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of(DATA_STREAM_NAME + "*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) - .template(new Template(null, new CompressedXContent(""" + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .template(Template.builder().mappings(new CompressedXContent(""" { "dynamic": false, "properties": { @@ -177,7 +178,7 @@ private void createDataStreamWithFailureStore() throws IOException { "type": "long" } } - }"""), null)) + }""")).dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(true))) .build() ); assertAcked(safeGet(client().execute(TransportPutComposableIndexTemplateAction.TYPE, request))); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index 96def04069e24..e9eaf7b5faddb 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; @@ -283,8 +284,8 @@ private void putComposableIndexTemplate(boolean failureStore) throws IOException request.indexTemplate( ComposableIndexTemplate.builder() .indexPatterns(List.of(dataStream + "*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, failureStore)) - .template(new Template(null, new CompressedXContent(""" + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .template(Template.builder().mappings(new CompressedXContent(""" { "dynamic": false, "properties": { @@ -295,7 +296,7 @@ private void putComposableIndexTemplate(boolean failureStore) throws IOException "type": "long" } } - }"""), null)) + }""")).dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(failureStore))) .build() ); client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java index 4c85958498da0..aa6ecf35e06fa 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java @@ -78,7 +78,7 @@ public class ResolveClusterDataStreamIT extends AbstractMultiClustersTestCase { private static long LATEST_TIMESTAMP = 1691348820000L; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 89c440f5edf8b..19067d85a6805 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAction; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -1226,9 +1227,10 @@ static void putComposableIndexTemplate( .settings(settings) .mappings(mappings == null ? null : CompressedXContent.fromJSON(mappings)) .lifecycle(lifecycle) + .dataStreamOptions(DataStreamTestHelper.createDataStreamOptionsTemplate(withFailureStore)) ) .metadata(metadata) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, withFailureStore)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) .build() ); client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java index 980cc32a12c68..de6b7a682324e 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java @@ -11,12 +11,14 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.junit.Before; import java.io.IOException; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -40,10 +42,14 @@ public void setup() throws IOException { "template": { "settings": { "number_of_replicas": 0 + }, + "data_stream_options": { + "failure_store": { + "enabled": true + } } }, "data_stream": { - "failure_store": true } } """); @@ -59,12 +65,63 @@ public void setup() throws IOException { assertThat(dataStreams.size(), is(1)); Map dataStream = (Map) dataStreams.get(0); assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); + assertThat(((Map) dataStream.get("failure_store")).get("enabled"), is(true)); List backingIndices = getIndices(dataStream); assertThat(backingIndices.size(), is(1)); List failureStore = getFailureStore(dataStream); assertThat(failureStore.size(), is(1)); } + public void testExplicitlyResetDataStreamOptions() throws IOException { + Request putComponentTemplateRequest = new Request("POST", "/_component_template/with-options"); + putComponentTemplateRequest.setJsonEntity(""" + { + "template": { + "data_stream_options": { + "failure_store": { + "enabled": true + } + } + } + } + """); + assertOK(client().performRequest(putComponentTemplateRequest)); + + Request invalidRequest = new Request("POST", "/_index_template/other-template"); + invalidRequest.setJsonEntity(""" + { + "index_patterns": ["something-else"], + "composed_of" : ["with-options"], + "template": { + "settings": { + "number_of_replicas": 0 + } + } + } + """); + Exception error = expectThrows(ResponseException.class, () -> client().performRequest(invalidRequest)); + assertThat( + error.getMessage(), + containsString("specifies data stream options that can only be used in combination with a data stream") + ); + + // Check that when we nullify the data stream options we can create use any component template in a non data stream template + Request otherRequest = new Request("POST", "/_index_template/other-template"); + otherRequest.setJsonEntity(""" + { + "index_patterns": ["something-else"], + "composed_of" : ["with-options"], + "template": { + "settings": { + "number_of_replicas": 0 + }, + "data_stream_options": null + } + } + """); + assertOK(client().performRequest(otherRequest)); + } + public void testEnableDisableFailureStore() throws IOException { { assertAcknowledged(client().performRequest(new Request("DELETE", "/_data_stream/" + DATA_STREAM_NAME + "/_options"))); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java index 20ec26c0c9341..85b914be30b2c 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java @@ -42,10 +42,14 @@ public void setup() throws IOException { "template": { "settings": { "number_of_replicas": 0 + }, + "data_stream_options": { + "failure_store": { + "enabled": true + } } }, "data_stream": { - "failure_store": true } } """); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1b0b0aa6abebe..1d3b1b676282a 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.indices.IndicesService; @@ -130,7 +131,7 @@ protected void shardOperation( DataStream dataStream = indexAbstraction.getParentDataStream(); assert dataStream != null; long maxTimestamp = 0L; - try (Engine.Searcher searcher = indexShard.acquireSearcher("data_stream_stats")) { + try (Engine.Searcher searcher = indexShard.acquireSearcher(ReadOnlyEngine.FIELD_RANGE_SEARCH_SOURCE)) { IndexReader indexReader = searcher.getIndexReader(); byte[] maxPackedValue = PointValues.getMaxPackedValue(indexReader, DataStream.TIMESTAMP_FIELD_NAME); if (maxPackedValue != null) { diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index 044ea90fec1af..0d19f555d10a4 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -212,8 +212,12 @@ setup: --- "Create data stream with failure store": - requires: - cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores default settings changed in 8.15+" + test_runner_features: [ capabilities, allowed_warnings ] + reason: "Data stream failure stores config in templates was added in 8.18+" + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] - do: ingest.put_pipeline: @@ -256,8 +260,7 @@ setup: name: my-template4 body: index_patterns: [ failure-data-stream1, failure-data-stream2 ] - data_stream: - failure_store: true + data_stream: {} template: settings: index: @@ -269,6 +272,9 @@ setup: type: date count: type: long + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: @@ -632,8 +638,12 @@ setup: --- "Delete data stream with failure store": - requires: - cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores REST structure changed in 8.15+" + reason: "Data stream failure stores config in templates was added in 8.18+" + test_runner_features: [ allowed_warnings, capabilities ] + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] - do: allowed_warnings: @@ -642,8 +652,7 @@ setup: name: my-template4 body: index_patterns: [ failure-data-stream1 ] - data_stream: - failure_store: true + data_stream: {} template: mappings: properties: @@ -651,6 +660,9 @@ setup: type: date count: type: long + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: @@ -722,8 +734,12 @@ setup: --- "Delete data stream with failure store uninitialized": - requires: - cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores REST structure changed in 8.15+" + reason: "Data stream failure stores config in templates was added in 8.18+" + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] - do: allowed_warnings: @@ -732,8 +748,11 @@ setup: name: my-template4 body: index_patterns: [ failure-data-stream1 ] - data_stream: - failure_store: true + data_stream: {} + template: + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 3fbf85ab1e702..9ea3bfefabdf8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -185,9 +185,12 @@ index without timestamp: --- TSDB failures go to failure store: - requires: - cluster_features: ["data_stream.failure_store.tsdb_fix"] - reason: "tests tsdb failure store fixes in 8.16.0 that catch timestamp errors that happen earlier in the process and redirect them to the failure store." - + reason: "Data stream failure stores config in templates was added in 8.18+" + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] - do: allowed_warnings: - "index template [my-template2] has index patterns [fs-k8s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" @@ -195,8 +198,7 @@ TSDB failures go to failure store: name: my-template2 body: index_patterns: [ "fs-k8s*" ] - data_stream: - failure_store: true + data_stream: {} template: settings: index: @@ -207,6 +209,9 @@ TSDB failures go to failure store: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z + data_stream_options: + failure_store: + enabled: true mappings: properties: "@timestamp": diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index 3c6d29d939226..13f79e95d99f4 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -92,9 +92,12 @@ --- "Modify a data stream's failure store": - requires: - cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores REST structure changed in 8.15+" - test_runner_features: [ "allowed_warnings" ] + reason: "Data stream failure stores config in templates was added in 8.18+" + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] - do: allowed_warnings: @@ -103,8 +106,7 @@ name: my-template body: index_patterns: [data-*] - data_stream: - failure_store: true + data_stream: {} template: mappings: properties: @@ -112,6 +114,9 @@ type: date count: type: long + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 9b5a9dae8bc0a..2f6b7a0bff34b 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -1,3 +1,15 @@ +setup: + - requires: + reason: "Data stream options was added in 8.18+" + test_runner_features: [ capabilities, allowed_warnings, contains ] + capabilities: + - method: POST + path: /{index}/_doc + capabilities: [ 'failure_store_status' ] + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] + --- teardown: - do: @@ -32,13 +44,6 @@ teardown: --- "Redirect ingest failure in data stream to failure store": - - requires: - reason: "Failure store status was added in 8.16+" - test_runner_features: [capabilities, allowed_warnings, contains] - capabilities: - - method: POST - path: /{index}/_doc - capabilities: [ 'failure_store_status' ] - do: ingest.put_pipeline: id: "failing_pipeline" @@ -78,14 +83,16 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: default_pipeline: "parent_failing_pipeline" + data_stream_options: + failure_store: + enabled: true - do: index: @@ -147,14 +154,6 @@ teardown: --- "Redirect shard failure in data stream to failure store": - - requires: - reason: "Failure store status was added in 8.16+" - test_runner_features: [ capabilities, allowed_warnings, contains ] - capabilities: - - method: POST - path: /{index}/_doc - capabilities: [ 'failure_store_status' ] - - do: allowed_warnings: - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" @@ -162,8 +161,7 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 @@ -174,7 +172,9 @@ teardown: type: date count: type: long - + data_stream_options: + failure_store: + enabled: true - do: index: @@ -231,13 +231,6 @@ teardown: --- "Ensure failure is redirected to correct failure store after a reroute processor": - - requires: - test_runner_features: [allowed_warnings, capabilities] - reason: "Failure store status was added in 8.16+" - capabilities: - - method: POST - path: /{index}/_doc - capabilities: [ 'failure_store_status' ] - do: ingest.put_pipeline: id: "failing_pipeline" @@ -262,14 +255,16 @@ teardown: name: destination_template body: index_patterns: destination-data-stream - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: default_pipeline: "failing_pipeline" + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: @@ -331,11 +326,6 @@ teardown: --- "Failure redirects to original failure store during index change if self referenced": - - requires: - cluster_features: [ "gte_v8.15.0" ] - reason: "data stream failure stores REST structure changed in 8.15+" - test_runner_features: [ allowed_warnings, contains ] - - do: ingest.put_pipeline: id: "failing_pipeline" @@ -365,14 +355,16 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: default_pipeline: "failing_pipeline" + data_stream_options: + failure_store: + enabled: true - do: index: @@ -430,14 +422,6 @@ teardown: --- "Failure redirects to original failure store during index change if final pipeline changes target": - - requires: - reason: "Failure store status was added in 8.16+" - test_runner_features: [ capabilities, allowed_warnings, contains ] - capabilities: - - method: POST - path: /{index}/_doc - capabilities: [ 'failure_store_status' ] - - do: ingest.put_pipeline: id: "change_index_pipeline" @@ -462,14 +446,16 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: final_pipeline: "change_index_pipeline" + data_stream_options: + failure_store: + enabled: true - do: index: @@ -526,14 +512,6 @@ teardown: --- "Failure redirects to correct failure store when index loop is detected": - - requires: - reason: "Failure store status was added in 8.16+" - test_runner_features: [ capabilities, allowed_warnings, contains ] - capabilities: - - method: POST - path: /{index}/_doc - capabilities: [ 'failure_store_status' ] - - do: ingest.put_pipeline: id: "send_to_destination" @@ -575,14 +553,16 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: default_pipeline: "send_to_destination" + data_stream_options: + failure_store: + enabled: true - do: allowed_warnings: @@ -591,14 +571,16 @@ teardown: name: destination_logs_template body: index_patterns: destination-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: default_pipeline: "send_back_to_original" + data_stream_options: + failure_store: + enabled: true - do: index: @@ -657,14 +639,6 @@ teardown: --- "Failure redirects to correct failure store when pipeline loop is detected": - - requires: - reason: "Failure store status was added in 8.16+" - test_runner_features: [ capabilities, allowed_warnings, contains ] - capabilities: - - method: POST - path: /{index}/_doc - capabilities: [ 'failure_store_status' ] - - do: ingest.put_pipeline: id: "step_1" @@ -706,14 +680,16 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 index: default_pipeline: "step_1" + data_stream_options: + failure_store: + enabled: true - do: index: @@ -773,9 +749,6 @@ teardown: --- "Version conflicts are not redirected to failure store": - - requires: - test_runner_features: [ allowed_warnings] - - do: allowed_warnings: - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" @@ -783,8 +756,7 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 @@ -795,6 +767,9 @@ teardown: type: date count: type: long + data_stream_options: + failure_store: + enabled: true - do: bulk: @@ -812,17 +787,6 @@ teardown: --- "Test failure store status with bulk request": - - requires: - test_runner_features: [ allowed_warnings, capabilities ] - reason: "Failure store status was added in 8.16+" - capabilities: - - method: POST - path: /_bulk - capabilities: [ 'failure_store_status' ] - - method: PUT - path: /_bulk - capabilities: [ 'failure_store_status' ] - - do: allowed_warnings: - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" @@ -830,8 +794,7 @@ teardown: name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 @@ -842,6 +805,9 @@ teardown: type: date count: type: long + data_stream_options: + failure_store: + enabled: true - do: allowed_warnings: - "index template [no-fs] has index patterns [no-fs*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [no-fs] will take precedence during new index creation" @@ -849,8 +815,7 @@ teardown: name: no-fs body: index_patterns: no-fs* - data_stream: - failure_store: false + data_stream: {} template: settings: number_of_shards: 1 @@ -861,6 +826,9 @@ teardown: type: date count: type: long + data_stream_options: + failure_store: + enabled: false - do: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml index 0742435f045fb..cc3a11ffde5e8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml @@ -1,9 +1,15 @@ --- setup: - requires: - cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores REST structure changed in 8.15+" - test_runner_features: [allowed_warnings, contains, capabilities] + reason: "Data stream failure stores config in templates was added in 8.16+" + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] + - method: POST + path: /{index}/_rollover + capabilities: [ 'lazy-rollover-failure-store' ] - do: allowed_warnings: @@ -12,8 +18,7 @@ setup: name: my-template body: index_patterns: [data-*] - data_stream: - failure_store: true + data_stream: {} template: mappings: properties: @@ -21,6 +26,9 @@ setup: type: date count: type: long + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: @@ -145,14 +153,6 @@ teardown: --- "Lazily roll over a data stream's failure store after a shard failure": - - requires: - reason: "data stream failure store lazy rollover only supported in 8.15+" - test_runner_features: [allowed_warnings, capabilities] - capabilities: - - method: POST - path: /{index}/_rollover - capabilities: [lazy-rollover-failure-store] - # Initialize failure store - do: index: @@ -215,14 +215,6 @@ teardown: --- "Lazily roll over a data stream's failure store after an ingest failure": - - requires: - reason: "data stream failure store lazy rollover only supported in 8.15+" - test_runner_features: [allowed_warnings, capabilities] - capabilities: - - method: POST - path: /{index}/_rollover - capabilities: [lazy-rollover-failure-store] - - do: ingest.put_pipeline: id: "failing_pipeline" @@ -246,12 +238,14 @@ teardown: name: my-template body: index_patterns: [data-*] - data_stream: - failure_store: true + data_stream: {} template: settings: index: default_pipeline: "failing_pipeline" + data_stream_options: + failure_store: + enabled: true - do: indices.create_data_stream: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index 61d17c3d675cf..60500767213af 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -50,9 +50,12 @@ --- "Don't initialize failure store during data stream auto-creation on successful index": - requires: - cluster_features: ["gte_v8.15.0"] - reason: "data stream failure stores REST structure changed in 8.15+" - test_runner_features: allowed_warnings + reason: "Data stream failure stores config in templates was added in 8.18+" + test_runner_features: [allowed_warnings, capabilities] + capabilities: + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] - do: allowed_warnings: @@ -61,12 +64,14 @@ name: generic_logs_template body: index_patterns: logs-* - data_stream: - failure_store: true + data_stream: {} template: settings: number_of_shards: 1 number_of_replicas: 1 + data_stream_options: + failure_store: + enabled: true - do: index: diff --git a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 007fe39d72e61..83a7bdf7e224a 100644 --- a/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/modules/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -196,7 +196,7 @@ public IngestDocument execute(IngestDocument ingestDocument) { * @param property property to add * @param value value to add */ - private void addAdditionalField(Map additionalFields, Property property, String value) { + private void addAdditionalField(Map additionalFields, Property property, String value) { if (properties.contains(property) && Strings.hasLength(value)) { additionalFields.put(property.toLowerCase(), value); } @@ -233,7 +233,7 @@ public AttachmentProcessor create( String processorTag, String description, Map config - ) throws Exception { + ) { String field = readStringProperty(TYPE, processorTag, config, "field"); String resourceName = readOptionalStringProperty(TYPE, processorTag, config, "resource_name"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "attachment"); @@ -241,8 +241,8 @@ public AttachmentProcessor create( int indexedChars = readIntProperty(TYPE, processorTag, config, "indexed_chars", NUMBER_OF_CHARS_INDEXED); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); String indexedCharsField = readOptionalStringProperty(TYPE, processorTag, config, "indexed_chars_field"); - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // update the [remove_binary] default to be 'true' assuming enough time has passed. Deprecated in September 2022. + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + // Revisit whether we want to update the [remove_binary] default to be 'true' - would need to find a way to do this safely Boolean removeBinary = readOptionalBooleanProperty(TYPE, processorTag, config, "remove_binary"); if (removeBinary == null) { DEPRECATION_LOGGER.warn( diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java index c4d0aef0183ed..c128af69009be 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java @@ -123,7 +123,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index 47ca79e3cb3b9..96525d427d3e8 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -44,7 +44,7 @@ public class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable { private static boolean includeSha256(TransportVersion version) { - return version.isPatchFrom(TransportVersions.V_8_15_0) || version.onOrAfter(TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER); + return version.onOrAfter(TransportVersions.V_8_15_0); } private static final ParseField DATABASES = new ParseField("databases"); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java index b6e73f3f33f7c..a50fe7dee9008 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java @@ -69,7 +69,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } public Map getDatabases() { @@ -138,7 +138,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java index a26364f9305e1..aa48c73cf1d73 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java @@ -138,7 +138,7 @@ public DatabaseConfiguration(StreamInput in) throws IOException { } private static Provider readProvider(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { return in.readNamedWriteable(Provider.class); } else { // prior to the above version, everything was always a maxmind, so this half of the if is logical @@ -154,7 +154,7 @@ public static DatabaseConfiguration parse(XContentParser parser, String id) { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeNamedWriteable(provider); } else { if (provider instanceof Maxmind maxmind) { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 93dedb5cb9645..95af247447688 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; @@ -131,9 +130,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java index 4669ab25f5d8c..b21cabad9290c 100644 --- a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java +++ b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java @@ -54,7 +54,7 @@ public class CrossClusterPainlessExecuteIT extends AbstractMultiClustersTestCase private static final String KEYWORD_FIELD = "my_field"; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt index 875b9a1dac3e8..85dba97a392b4 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt @@ -132,8 +132,8 @@ class org.elasticsearch.script.field.SeqNoDocValuesField @dynamic_type { class org.elasticsearch.script.field.VersionDocValuesField @dynamic_type { } -class org.elasticsearch.script.field.vectors.MultiDenseVector { - MultiDenseVector EMPTY +class org.elasticsearch.script.field.vectors.RankVectors { + RankVectors EMPTY float[] getMagnitudes() Iterator getVectors() @@ -142,9 +142,9 @@ class org.elasticsearch.script.field.vectors.MultiDenseVector { int size() } -class org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField { - MultiDenseVector get() - MultiDenseVector get(MultiDenseVector) +class org.elasticsearch.script.field.vectors.RankVectorsDocValuesField { + RankVectors get() + RankVectors get(RankVectors) } class org.elasticsearch.script.field.vectors.DenseVector { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt index 5a1d8c002aa17..a5118db4876cb 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt @@ -50,7 +50,7 @@ static_import { double cosineSimilarity(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity double dotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct double hamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$Hamming - double maxSimDotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.MultiVectorScoreScriptUtils$MaxSimDotProduct - double maxSimInvHamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.MultiVectorScoreScriptUtils$MaxSimInvHamming + double maxSimDotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.RankVectorsScoreScriptUtils$MaxSimDotProduct + double maxSimInvHamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.RankVectorsScoreScriptUtils$MaxSimInvHamming } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index b2db0d1006d40..4815b9c10e733 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -123,7 +123,7 @@ class org.elasticsearch.index.mapper.vectors.DenseVectorScriptDocValues { float getMagnitude() } -class org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues { +class org.elasticsearch.index.mapper.vectors.RankVectorsScriptDocValues { Iterator getVectorValues() float[] getMagnitudes() } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_multi_dense_vector_max_sim.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_rank_vectors_max_sim.yml similarity index 95% rename from modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_multi_dense_vector_max_sim.yml rename to modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_rank_vectors_max_sim.yml index 77d4b70cdfcae..7c46fbc9a26a5 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_multi_dense_vector_max_sim.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_rank_vectors_max_sim.yml @@ -3,9 +3,9 @@ setup: capabilities: - method: POST path: /_search - capabilities: [ multi_dense_vector_script_max_sim_with_bugfix ] + capabilities: [ rank_vectors_script_max_sim_with_bugfix ] test_runner_features: capabilities - reason: "Support for multi dense vector max-sim functions capability required" + reason: "Support for rank vectors max-sim functions capability required" - skip: features: headers @@ -18,14 +18,14 @@ setup: mappings: properties: vector: - type: multi_dense_vector + type: rank_vectors dims: 5 byte_vector: - type: multi_dense_vector + type: rank_vectors dims: 5 element_type: byte bit_vector: - type: multi_dense_vector + type: rank_vectors dims: 40 element_type: bit - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_rank_vectors_dv_fields_api.yml similarity index 94% rename from modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml rename to modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_rank_vectors_dv_fields_api.yml index 66cb3f3c46fcc..f37e554fca7bf 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_rank_vectors_dv_fields_api.yml @@ -3,9 +3,9 @@ setup: capabilities: - method: POST path: /_search - capabilities: [ multi_dense_vector_script_access ] + capabilities: [ rank_vectors_script_access ] test_runner_features: capabilities - reason: "Support for multi dense vector field script access capability required" + reason: "Support for rank vector field script access capability required" - skip: features: headers @@ -18,14 +18,14 @@ setup: mappings: properties: vector: - type: multi_dense_vector + type: rank_vectors dims: 5 byte_vector: - type: multi_dense_vector + type: rank_vectors dims: 5 element_type: byte bit_vector: - type: multi_dense_vector + type: rank_vectors dims: 40 element_type: bit - do: diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 05f456b7f2229..8a7f1405f8f4e 100644 --- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -1359,7 +1359,7 @@ public void testKnnQueryNotSupportedInPercolator() throws IOException { """); indicesAdmin().prepareCreate("index1").setMapping(mappings).get(); ensureGreen(); - QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, 10, null); + QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, 10, null, null); IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("knn_query1") .setSource(jsonBuilder().startObject().field("my_query", knnVectorQueryBuilder).endObject()); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java index 8b94337141243..4624393e9fb60 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -36,7 +36,7 @@ protected boolean reuseClusters() { } @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 73936d82fc204..08bdc2051b9e3 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -144,7 +144,7 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException { @Override public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { - blobStore.deleteBlobsIgnoringIfNotExists(purpose, new Iterator<>() { + blobStore.deleteBlobs(purpose, new Iterator<>() { @Override public boolean hasNext() { return blobNames.hasNext(); diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index e4f973fb73a4e..3cac0dc4bb6db 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -264,8 +264,7 @@ public DeleteResult deleteBlobDirectory(OperationPurpose purpose, String path) t return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { + void deleteBlobs(OperationPurpose purpose, Iterator blobNames) { if (blobNames.hasNext() == false) { return; } diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java index f6e97187222e7..8979507230bdd 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java @@ -72,7 +72,7 @@ public void testRetriesAndOperationsAreTrackedSeparately() throws IOException { false ); case LIST_BLOBS -> blobStore.listBlobsByPrefix(purpose, randomIdentifier(), randomIdentifier()); - case BLOB_BATCH -> blobStore.deleteBlobsIgnoringIfNotExists( + case BLOB_BATCH -> blobStore.deleteBlobs( purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator() ); @@ -113,7 +113,7 @@ public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException os.flush(); }); // BLOB_BATCH - blobStore.deleteBlobsIgnoringIfNotExists(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()); + blobStore.deleteBlobs(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()); Map stats = blobStore.stats(); String statsMapString = stats.toString(); @@ -148,10 +148,7 @@ public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless( os.flush(); }); // BLOB_BATCH - blobStore.deleteBlobsIgnoringIfNotExists( - purpose, - List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator() - ); + blobStore.deleteBlobs(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()); } Map stats = blobStore.stats(); diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 047549cc893ed..edcf03580da09 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -114,12 +114,12 @@ public void writeBlobAtomic(OperationPurpose purpose, String blobName, BytesRefe @Override public DeleteResult delete(OperationPurpose purpose) throws IOException { - return blobStore.deleteDirectory(purpose, path().buildAsString()); + return blobStore.deleteDirectory(path().buildAsString()); } @Override public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { - blobStore.deleteBlobsIgnoringIfNotExists(purpose, new Iterator<>() { + blobStore.deleteBlobs(new Iterator<>() { @Override public boolean hasNext() { return blobNames.hasNext(); diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 9cbf64e7e0146..c68217a1a3738 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreActionStats; import org.elasticsearch.common.blobstore.DeleteResult; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.OptionalBytesReference; import org.elasticsearch.common.blobstore.support.BlobContainerUtils; import org.elasticsearch.common.blobstore.support.BlobMetadata; @@ -491,10 +490,9 @@ private void writeBlobMultipart(BlobInfo blobInfo, byte[] buffer, int offset, in /** * Deletes the given path and all its children. * - * @param purpose The purpose of the delete operation * @param pathStr Name of path to delete */ - DeleteResult deleteDirectory(OperationPurpose purpose, String pathStr) throws IOException { + DeleteResult deleteDirectory(String pathStr) throws IOException { return SocketAccess.doPrivilegedIOException(() -> { DeleteResult deleteResult = DeleteResult.ZERO; Page page = client().list(bucketName, BlobListOption.prefix(pathStr)); @@ -502,7 +500,7 @@ DeleteResult deleteDirectory(OperationPurpose purpose, String pathStr) throws IO final AtomicLong blobsDeleted = new AtomicLong(0L); final AtomicLong bytesDeleted = new AtomicLong(0L); final Iterator blobs = page.getValues().iterator(); - deleteBlobsIgnoringIfNotExists(purpose, new Iterator<>() { + deleteBlobs(new Iterator<>() { @Override public boolean hasNext() { return blobs.hasNext(); @@ -526,11 +524,9 @@ public String next() { /** * Deletes multiple blobs from the specific bucket using a batch request * - * @param purpose the purpose of the delete operation * @param blobNames names of the blobs to delete */ - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { + void deleteBlobs(Iterator blobNames) throws IOException { if (blobNames.hasNext() == false) { return; } diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 2cfb5d23db4ff..f0dc1ca714958 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -18,15 +18,11 @@ esplugin { classname 'org.elasticsearch.repositories.s3.S3RepositoryPlugin' } -versions << [ - 'aws': '1.12.270' -] - dependencies { - api "com.amazonaws:aws-java-sdk-s3:${versions.aws}" - api "com.amazonaws:aws-java-sdk-core:${versions.aws}" - api "com.amazonaws:aws-java-sdk-sts:${versions.aws}" - api "com.amazonaws:jmespath-java:${versions.aws}" + api "com.amazonaws:aws-java-sdk-s3:${versions.awsv1sdk}" + api "com.amazonaws:aws-java-sdk-core:${versions.awsv1sdk}" + api "com.amazonaws:aws-java-sdk-sts:${versions.awsv1sdk}" + api "com.amazonaws:jmespath-java:${versions.awsv1sdk}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java index 2199a64521759..67ada622efeea 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; import java.io.Closeable; import java.io.IOException; @@ -27,7 +28,6 @@ import java.util.function.UnaryOperator; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -152,10 +152,9 @@ private void testNonexistentBucket(Boolean readonly) throws Exception { final var responseException = expectThrows(ResponseException.class, () -> client().performRequest(registerRequest)); assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); - assertThat( - responseException.getMessage(), - allOf(containsString("repository_verification_exception"), containsString("is not accessible on master node")) - ); + final var responseObjectPath = ObjectPath.createFromResponse(responseException.getResponse()); + assertThat(responseObjectPath.evaluate("error.type"), equalTo("repository_verification_exception")); + assertThat(responseObjectPath.evaluate("error.reason"), containsString("is not accessible on master node")); } public void testNonexistentClient() throws Exception { @@ -181,15 +180,11 @@ private void testNonexistentClient(Boolean readonly) throws Exception { final var responseException = expectThrows(ResponseException.class, () -> client().performRequest(registerRequest)); assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); - assertThat( - responseException.getMessage(), - allOf( - containsString("repository_verification_exception"), - containsString("is not accessible on master node"), - containsString("illegal_argument_exception"), - containsString("Unknown s3 client name") - ) - ); + final var responseObjectPath = ObjectPath.createFromResponse(responseException.getResponse()); + assertThat(responseObjectPath.evaluate("error.type"), equalTo("repository_verification_exception")); + assertThat(responseObjectPath.evaluate("error.reason"), containsString("is not accessible on master node")); + assertThat(responseObjectPath.evaluate("error.caused_by.type"), equalTo("illegal_argument_exception")); + assertThat(responseObjectPath.evaluate("error.caused_by.reason"), containsString("Unknown s3 client name")); } public void testNonexistentSnapshot() throws Exception { @@ -212,7 +207,8 @@ private void testNonexistentSnapshot(Boolean readonly) throws Exception { final var getSnapshotRequest = new Request("GET", "/_snapshot/" + repositoryName + "/" + randomIdentifier()); final var getSnapshotException = expectThrows(ResponseException.class, () -> client().performRequest(getSnapshotRequest)); assertEquals(RestStatus.NOT_FOUND.getStatus(), getSnapshotException.getResponse().getStatusLine().getStatusCode()); - assertThat(getSnapshotException.getMessage(), containsString("snapshot_missing_exception")); + final var getResponseObjectPath = ObjectPath.createFromResponse(getSnapshotException.getResponse()); + assertThat(getResponseObjectPath.evaluate("error.type"), equalTo("snapshot_missing_exception")); final var restoreRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + randomIdentifier() + "/_restore"); if (randomBoolean()) { @@ -220,13 +216,15 @@ private void testNonexistentSnapshot(Boolean readonly) throws Exception { } final var restoreException = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), restoreException.getResponse().getStatusLine().getStatusCode()); - assertThat(restoreException.getMessage(), containsString("snapshot_restore_exception")); + final var restoreResponseObjectPath = ObjectPath.createFromResponse(restoreException.getResponse()); + assertThat(restoreResponseObjectPath.evaluate("error.type"), equalTo("snapshot_restore_exception")); if (readonly != Boolean.TRUE) { final var deleteRequest = new Request("DELETE", "/_snapshot/" + repositoryName + "/" + randomIdentifier()); final var deleteException = expectThrows(ResponseException.class, () -> client().performRequest(deleteRequest)); assertEquals(RestStatus.NOT_FOUND.getStatus(), deleteException.getResponse().getStatusLine().getStatusCode()); - assertThat(deleteException.getMessage(), containsString("snapshot_missing_exception")); + final var deleteResponseObjectPath = ObjectPath.createFromResponse(deleteException.getResponse()); + assertThat(deleteResponseObjectPath.evaluate("error.type"), equalTo("snapshot_missing_exception")); } } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index e13cc40dd3e0f..bf693222a4b72 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -342,10 +342,10 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException { return summary.getKey(); }); if (list.isTruncated()) { - blobStore.deleteBlobsIgnoringIfNotExists(purpose, blobNameIterator); + blobStore.deleteBlobs(purpose, blobNameIterator); prevListing = list; } else { - blobStore.deleteBlobsIgnoringIfNotExists(purpose, Iterators.concat(blobNameIterator, Iterators.single(keyPath))); + blobStore.deleteBlobs(purpose, Iterators.concat(blobNameIterator, Iterators.single(keyPath))); break; } } @@ -357,7 +357,7 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException { @Override public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { - blobStore.deleteBlobsIgnoringIfNotExists(purpose, Iterators.map(blobNames, this::buildKey)); + blobStore.deleteBlobs(purpose, Iterators.map(blobNames, this::buildKey)); } @Override @@ -1025,7 +1025,7 @@ public void onResponse(Void unused) { // should be no other processes interacting with the repository. logger.warn( Strings.format( - "failed to clean up multipart upload [{}] of blob [{}][{}][{}]", + "failed to clean up multipart upload [%s] of blob [%s][%s][%s]", abortMultipartUploadRequest.getUploadId(), blobStore.getRepositoryMetadata().name(), abortMultipartUploadRequest.getBucketName(), diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 4f2b0f213e448..4bd54aa37077f 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -340,8 +340,7 @@ public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); } - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { + void deleteBlobs(OperationPurpose purpose, Iterator blobNames) throws IOException { if (blobNames.hasNext() == false) { return; } diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index 8538d2ba673bc..0e9c735b22fd6 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.blobstore.url.http.HttpURLBlobContainer; import org.elasticsearch.common.blobstore.url.http.URLHttpClient; import org.elasticsearch.common.blobstore.url.http.URLHttpClientSettings; @@ -23,10 +22,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedFunction; -import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; -import java.util.Iterator; import java.util.List; /** @@ -109,11 +106,6 @@ public BlobContainer blobContainer(BlobPath blobPath) { } } - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { - throw new UnsupportedOperationException("Bulk deletes are not supported in URL repositories"); - } - @Override public void close() { // nothing to do here... diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 3fd5cc44a3403..1d39b993cef92 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -40,6 +40,7 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; +import org.apache.http.ConnectionClosedException; import org.apache.http.HttpHost; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; @@ -48,6 +49,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; @@ -100,6 +102,7 @@ import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -110,6 +113,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED; @@ -1039,8 +1043,16 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } } - public void testRespondAfterClose() throws Exception { - final String url = "/thing"; + public void testRespondAfterServiceCloseWithClientCancel() throws Exception { + runRespondAfterServiceCloseTest(true); + } + + public void testRespondAfterServiceCloseWithServerCancel() throws Exception { + runRespondAfterServiceCloseTest(false); + } + + private void runRespondAfterServiceCloseTest(boolean clientCancel) throws Exception { + final String url = "/" + randomIdentifier(); final CountDownLatch responseReleasedLatch = new CountDownLatch(1); final SubscribableListener transportClosedFuture = new SubscribableListener<>(); final CountDownLatch handlingRequestLatch = new CountDownLatch(1); @@ -1066,7 +1078,9 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th try ( Netty4HttpServerTransport transport = new Netty4HttpServerTransport( - Settings.EMPTY, + clientCancel + ? Settings.EMPTY + : Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), TimeValue.timeValueMillis(1)).build(), networkService, threadPool, xContentRegistry(), @@ -1082,11 +1096,24 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final var address = randomFrom(transport.boundAddress().boundAddresses()).address(); try (var client = RestClient.builder(new HttpHost(address.getAddress(), address.getPort())).build()) { - client.performRequestAsync(new Request("GET", url), ActionTestUtils.wrapAsRestResponseListener(ActionListener.noop())); + final var responseExceptionFuture = new PlainActionFuture(); + final var cancellable = client.performRequestAsync( + new Request("GET", url), + ActionTestUtils.wrapAsRestResponseListener(ActionTestUtils.assertNoSuccessListener(responseExceptionFuture::onResponse)) + ); safeAwait(handlingRequestLatch); + if (clientCancel) { + threadPool.generic().execute(cancellable::cancel); + } transport.close(); transportClosedFuture.onResponse(null); safeAwait(responseReleasedLatch); + final var responseException = safeGet(responseExceptionFuture); + if (clientCancel) { + assertThat(responseException, instanceOf(CancellationException.class)); + } else { + assertThat(responseException, instanceOf(ConnectionClosedException.class)); + } } } } diff --git a/muted-tests.yml b/muted-tests.yml index b2f5b08319ff7..c0e3c217abce2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -2,12 +2,6 @@ tests: - class: "org.elasticsearch.client.RestClientSingleHostIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/102717" method: "testRequestResetAndAbort" -- class: org.elasticsearch.xpack.restart.FullClusterRestartIT - method: testSingleDoc {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111434 -- class: org.elasticsearch.xpack.restart.FullClusterRestartIT - method: testDataStreams {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111448 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 @@ -100,15 +94,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} issue: https://github.com/elastic/elasticsearch/issues/115808 -- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT - method: test {yaml=reference/watcher/example-watches/example-watch-clusterstatus/line_137} - issue: https://github.com/elastic/elasticsearch/issues/115809 - class: org.elasticsearch.search.StressSearchServiceReaperIT method: testStressReaper issue: https://github.com/elastic/elasticsearch/issues/115816 -- class: org.elasticsearch.search.SearchServiceTests - method: testParseSourceValidation - issue: https://github.com/elastic/elasticsearch/issues/115936 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT @@ -123,18 +111,12 @@ tests: - class: org.elasticsearch.action.search.SearchPhaseControllerTests method: testProgressListener issue: https://github.com/elastic/elasticsearch/issues/116149 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=terms_enum/10_basic/Test security} - issue: https://github.com/elastic/elasticsearch/issues/116178 - class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT method: testSearchWithRandomDisconnects issue: https://github.com/elastic/elasticsearch/issues/116175 - class: org.elasticsearch.xpack.deprecation.DeprecationHttpIT method: testDeprecatedSettingsReturnWarnings issue: https://github.com/elastic/elasticsearch/issues/108628 -- class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests - method: testBottomFieldSort - issue: https://github.com/elastic/elasticsearch/issues/116249 - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 @@ -144,9 +126,6 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testInvalidJSON issue: https://github.com/elastic/elasticsearch/issues/116521 -- class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests - method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange - issue: https://github.com/elastic/elasticsearch/issues/116523 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 @@ -178,9 +157,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=snapshot/10_basic/Create a source only snapshot and then restore it} issue: https://github.com/elastic/elasticsearch/issues/117295 -- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests - method: testRetryPointInTime - issue: https://github.com/elastic/elasticsearch/issues/117116 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 @@ -210,9 +186,6 @@ tests: - class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" method: "test {scoring.*}" issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testSupportedStream - issue: https://github.com/elastic/elasticsearch/issues/117745 - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {scoring.QstrWithFieldAndScoringSortedEval} issue: https://github.com/elastic/elasticsearch/issues/117751 @@ -224,11 +197,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117815 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT issue: https://github.com/elastic/elasticsearch/issues/111319 -- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/117893 -- class: org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilderTests - method: testToQuery - issue: https://github.com/elastic/elasticsearch/issues/117904 - class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests method: test20NoAutoGenerationWhenAutoConfigurationDisabled issue: https://github.com/elastic/elasticsearch/issues/117891 @@ -238,15 +206,123 @@ tests: - class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests method: testFallbackIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/117937 -- class: org.elasticsearch.xpack.esql.qa.single_node.RequestIndexFilteringIT - method: testFieldExistsFilter_KeepWildcard - issue: https://github.com/elastic/elasticsearch/issues/117935 -- class: org.elasticsearch.xpack.esql.qa.multi_node.RequestIndexFilteringIT - method: testFieldExistsFilter_KeepWildcard - issue: https://github.com/elastic/elasticsearch/issues/117935 - class: org.elasticsearch.xpack.ml.integration.RegressionIT method: testTwoJobsWithSameRandomizeSeedUseSameTrainingSet issue: https://github.com/elastic/elasticsearch/issues/117805 +- class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests + method: test30NoAutogenerationWhenDaemonized + issue: https://github.com/elastic/elasticsearch/issues/117956 +- class: org.elasticsearch.packaging.test.CertGenCliTests + method: test40RunWithCert + issue: https://github.com/elastic/elasticsearch/issues/117955 +- class: org.elasticsearch.upgrades.QueryBuilderBWCIT + method: testQueryBuilderBWC {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/116990 +- class: org.elasticsearch.xpack.restart.QueryBuilderBWCIT + method: testQueryBuilderBWC {p0=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/116989 +- class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT + method: testReindexWithShutdown + issue: https://github.com/elastic/elasticsearch/issues/118040 +- class: org.elasticsearch.packaging.test.ConfigurationTests + method: test20HostnameSubstitution + issue: https://github.com/elastic/elasticsearch/issues/118028 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test40AutoconfigurationNotTriggeredWhenNodeIsMeantToJoinExistingCluster + issue: https://github.com/elastic/elasticsearch/issues/118029 +- class: org.elasticsearch.packaging.test.ConfigurationTests + method: test30SymlinkedDataPath + issue: https://github.com/elastic/elasticsearch/issues/118111 +- class: org.elasticsearch.packaging.test.KeystoreManagementTests + method: test30KeystorePasswordFromFile + issue: https://github.com/elastic/elasticsearch/issues/118123 +- class: org.elasticsearch.packaging.test.KeystoreManagementTests + method: test31WrongKeystorePasswordFromFile + issue: https://github.com/elastic/elasticsearch/issues/118123 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test41AutoconfigurationNotTriggeredWhenNodeCannotContainData + issue: https://github.com/elastic/elasticsearch/issues/118110 +- class: org.elasticsearch.xpack.remotecluster.CrossClusterEsqlRCS2UnavailableRemotesIT + method: testEsqlRcs2UnavailableRemoteScenarios + issue: https://github.com/elastic/elasticsearch/issues/117419 +- class: org.elasticsearch.packaging.test.DebPreservationTests + method: test40RestartOnUpgrade + issue: https://github.com/elastic/elasticsearch/issues/118170 +- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT + method: testInferDeploysDefaultRerank + issue: https://github.com/elastic/elasticsearch/issues/118184 +- class: org.elasticsearch.xpack.esql.action.EsqlActionTaskIT + method: testCancelRequestWhenFailingFetchingPages + issue: https://github.com/elastic/elasticsearch/issues/118193 +- class: org.elasticsearch.packaging.test.MemoryLockingTests + method: test20MemoryLockingEnabled + issue: https://github.com/elastic/elasticsearch/issues/118195 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test42AutoconfigurationNotTriggeredWhenNodeCannotBecomeMaster + issue: https://github.com/elastic/elasticsearch/issues/118196 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test43AutoconfigurationNotTriggeredWhenTlsAlreadyConfigured + issue: https://github.com/elastic/elasticsearch/issues/118202 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test44AutoConfigurationNotTriggeredOnNotWriteableConfDir + issue: https://github.com/elastic/elasticsearch/issues/118208 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test51AutoConfigurationWithPasswordProtectedKeystore + issue: https://github.com/elastic/elasticsearch/issues/118212 +- class: org.elasticsearch.ingest.common.IngestCommonClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/118215 +- class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT + method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} + issue: https://github.com/elastic/elasticsearch/issues/118217 +- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/118224 +- class: org.elasticsearch.packaging.test.ArchiveTests + method: test60StartAndStop + issue: https://github.com/elastic/elasticsearch/issues/118216 +- class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests + method: testBottomFieldSort + issue: https://github.com/elastic/elasticsearch/issues/118214 +- class: org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT + method: testTopNThenEnrichRemote + issue: https://github.com/elastic/elasticsearch/issues/118307 +- class: org.elasticsearch.xpack.remotecluster.CrossClusterEsqlRCS1UnavailableRemotesIT + method: testEsqlRcs1UnavailableRemoteScenarios + issue: https://github.com/elastic/elasticsearch/issues/118350 +- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests + method: testSearcherId + issue: https://github.com/elastic/elasticsearch/issues/118374 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/10_info/Info} + issue: https://github.com/elastic/elasticsearch/issues/118394 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/11_nodes/Additional disk information} + issue: https://github.com/elastic/elasticsearch/issues/118395 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/11_nodes/Test cat nodes output with full_id set} + issue: https://github.com/elastic/elasticsearch/issues/118396 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/11_nodes/Test cat nodes output} + issue: https://github.com/elastic/elasticsearch/issues/118397 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/20_reindex_status/Test get reindex status with nonexistent task id} + issue: https://github.com/elastic/elasticsearch/issues/118401 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Nonexistent Data Stream} + issue: https://github.com/elastic/elasticsearch/issues/118274 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Bad Data Stream Name} + issue: https://github.com/elastic/elasticsearch/issues/118272 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Unsupported Mode} + issue: https://github.com/elastic/elasticsearch/issues/118273 +- class: org.elasticsearch.xpack.inference.InferenceCrudIT + method: testUnifiedCompletionInference + issue: https://github.com/elastic/elasticsearch/issues/118405 +- class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT + method: testEveryActionIsEitherOperatorOnlyOrNonOperator + issue: https://github.com/elastic/elasticsearch/issues/118220 +- class: org.elasticsearch.xpack.esql.action.EsqlActionBreakerIT + issue: https://github.com/elastic/elasticsearch/issues/118238 # Examples: # diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java index 3fea1918252ea..9fb611345dbea 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuAnalyzerProvider.java @@ -28,7 +28,7 @@ public class IcuAnalyzerProvider extends AbstractIndexAnalyzerProvider private final Normalizer2 normalizer; public IcuAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); String method = settings.get("method", "nfkc_cf"); String mode = settings.get("mode", "compose"); if ("compose".equals(mode) == false && "decompose".equals(mode) == false) { diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java index ae8ead523b7ea..fe0b3a00b2bbb 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuCollationTokenFilterFactory.java @@ -44,7 +44,7 @@ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory { @SuppressWarnings("HiddenField") public IcuCollationTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); Collator collator; String rules = settings.get("rules"); diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java index 6cffbb7e0a17e..8932518dc5436 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuFoldingTokenFilterFactory.java @@ -39,7 +39,7 @@ public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory imp private final Normalizer2 normalizer; public IcuFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(ICU_FOLDING_NORMALIZER, settings); } diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java index 23b2c355b7a68..c9eceef30f62e 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuNormalizerTokenFilterFactory.java @@ -30,7 +30,7 @@ public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory private final Normalizer2 normalizer; public IcuNormalizerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); String method = settings.get("name", "nfkc_cf"); Normalizer2 normalizerInstance = Normalizer2.getInstance(null, method, Normalizer2.Mode.COMPOSE); this.normalizer = wrapWithUnicodeSetFilter(normalizerInstance, settings); diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java index 62ab6d8792905..c66d25ffa2f3b 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java @@ -39,7 +39,7 @@ public class IcuTokenizerFactory extends AbstractTokenizerFactory { private static final String RULE_FILES = "rule_files"; public IcuTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings, name); + super(name); config = getIcuConfig(environment, settings); } diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTransformTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTransformTokenFilterFactory.java index 785b083c4c31e..5a0a0b3897a47 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTransformTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTransformTokenFilterFactory.java @@ -26,7 +26,7 @@ public class IcuTransformTokenFilterFactory extends AbstractTokenFilterFactory i private final Transliterator transliterator; public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); this.id = settings.get("id", "Null"); String s = settings.get("dir", "forward"); this.dir = "forward".equals(s) ? Transliterator.FORWARD : Transliterator.REVERSE; diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/HiraganaUppercaseFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/HiraganaUppercaseFilterFactory.java index b22757af22372..2d761d99e742f 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/HiraganaUppercaseFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/HiraganaUppercaseFilterFactory.java @@ -18,7 +18,7 @@ public class HiraganaUppercaseFilterFactory extends AbstractTokenFilterFactory { public HiraganaUppercaseFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/JapaneseStopTokenFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/JapaneseStopTokenFilterFactory.java index e34d5246dd092..3a26e647092f7 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/JapaneseStopTokenFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/JapaneseStopTokenFilterFactory.java @@ -35,7 +35,7 @@ public class JapaneseStopTokenFilterFactory extends AbstractTokenFilterFactory { private final boolean removeTrailing; public JapaneseStopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.stopWords = Analysis.parseWords( diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KatakanaUppercaseFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KatakanaUppercaseFilterFactory.java index 1f72f1d57d2c5..0776c4ca970a9 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KatakanaUppercaseFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KatakanaUppercaseFilterFactory.java @@ -18,7 +18,7 @@ public class KatakanaUppercaseFilterFactory extends AbstractTokenFilterFactory { public KatakanaUppercaseFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiAnalyzerProvider.java index db336bec997c4..f0667da992be5 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiAnalyzerProvider.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiAnalyzerProvider.java @@ -26,7 +26,7 @@ public class KuromojiAnalyzerProvider extends AbstractIndexAnalyzerProvider stopWords = Analysis.parseStopWords(env, settings, JapaneseAnalyzer.getDefaultStopSet()); final JapaneseTokenizer.Mode mode = KuromojiTokenizerFactory.getMode(settings); final UserDictionary userDictionary = KuromojiTokenizerFactory.getUserDictionary(env, settings); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiBaseFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiBaseFormFilterFactory.java index 536e6fe993d06..2e8635704c6b0 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiBaseFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiBaseFormFilterFactory.java @@ -19,7 +19,7 @@ public class KuromojiBaseFormFilterFactory extends AbstractTokenFilterFactory { public KuromojiBaseFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionAnalyzerProvider.java index c4970251d2bb6..f7121fb211acc 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionAnalyzerProvider.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionAnalyzerProvider.java @@ -22,7 +22,7 @@ public class KuromojiCompletionAnalyzerProvider extends AbstractIndexAnalyzerPro private final JapaneseCompletionAnalyzer analyzer; public KuromojiCompletionAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); final UserDictionary userDictionary = KuromojiTokenizerFactory.getUserDictionary(env, settings); final Mode mode = KuromojiCompletionFilterFactory.getMode(settings); analyzer = new JapaneseCompletionAnalyzer(userDictionary, mode); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionFilterFactory.java index 3ec6d08145e69..a7a2d984c3bb7 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiCompletionFilterFactory.java @@ -22,7 +22,7 @@ public class KuromojiCompletionFilterFactory extends AbstractTokenFilterFactory private final Mode mode; public KuromojiCompletionFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); mode = getMode(settings); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiKatakanaStemmerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiKatakanaStemmerFactory.java index 7c4d3138381fb..c06f8b7a4941c 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiKatakanaStemmerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiKatakanaStemmerFactory.java @@ -21,7 +21,7 @@ public class KuromojiKatakanaStemmerFactory extends AbstractTokenFilterFactory { private final int minimumLength; public KuromojiKatakanaStemmerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); minimumLength = settings.getAsInt("minimum_length", JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiNumberFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiNumberFilterFactory.java index 089b7e10bfae0..605d2c66fac18 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiNumberFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiNumberFilterFactory.java @@ -18,7 +18,7 @@ public class KuromojiNumberFilterFactory extends AbstractTokenFilterFactory { public KuromojiNumberFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiPartOfSpeechFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiPartOfSpeechFilterFactory.java index e8efa781726f8..5ec3023ca4846 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiPartOfSpeechFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiPartOfSpeechFilterFactory.java @@ -27,7 +27,7 @@ public class KuromojiPartOfSpeechFilterFactory extends AbstractTokenFilterFactor private final Set stopTags = new HashSet<>(); public KuromojiPartOfSpeechFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); List wordList = Analysis.getWordList(env, settings, "stoptags"); if (wordList != null) { stopTags.addAll(wordList); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiReadingFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiReadingFormFilterFactory.java index 09ab0bbd4b8d8..8e9f03a0c6261 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiReadingFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiReadingFormFilterFactory.java @@ -21,7 +21,7 @@ public class KuromojiReadingFormFilterFactory extends AbstractTokenFilterFactory private final boolean useRomaji; public KuromojiReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); useRomaji = settings.getAsBoolean("use_romaji", false); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java index edb29a8f4c98e..aa978e3e73872 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java @@ -44,7 +44,7 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { private boolean discardCompoundToken; public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(indexSettings, settings, name); + super(name); mode = getMode(settings); userDictionary = getUserDictionary(env, settings); discardPunctuation = settings.getAsBoolean("discard_punctuation", true); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java index 180f6aa0a7f96..36094a4f46df1 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java @@ -29,7 +29,7 @@ public class NoriAnalyzerProvider extends AbstractIndexAnalyzerProvider tagList = Analysis.getWordList(env, settings, "stoptags"); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriNumberFilterFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriNumberFilterFactory.java index cbe92156cd765..ac64c5ff26428 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriNumberFilterFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriNumberFilterFactory.java @@ -19,7 +19,7 @@ public class NoriNumberFilterFactory extends AbstractTokenFilterFactory { public NoriNumberFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriPartOfSpeechStopFilterFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriPartOfSpeechStopFilterFactory.java index dddb485ab0df3..f93a1bd6e9094 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriPartOfSpeechStopFilterFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriPartOfSpeechStopFilterFactory.java @@ -26,7 +26,7 @@ public class NoriPartOfSpeechStopFilterFactory extends AbstractTokenFilterFactor private final Set stopTags; public NoriPartOfSpeechStopFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); List tagList = Analysis.getWordList(env, settings, "stoptags"); this.stopTags = tagList != null ? resolvePOSList(tagList) : KoreanPartOfSpeechStopFilter.DEFAULT_STOP_TAGS; } diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriReadingFormFilterFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriReadingFormFilterFactory.java index 1e1b211fdea09..ad5a4a6d1b21a 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriReadingFormFilterFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriReadingFormFilterFactory.java @@ -18,7 +18,7 @@ public class NoriReadingFormFilterFactory extends AbstractTokenFilterFactory { public NoriReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java index ed8458bc94043..40e159b343850 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java @@ -38,7 +38,7 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { private final boolean discardPunctuation; public NoriTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(indexSettings, settings, name); + super(name); decompoundMode = getMode(settings); userDictionary = getUserDictionary(env, settings, indexSettings); discardPunctuation = settings.getAsBoolean("discard_punctuation", true); diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/PhoneticTokenFilterFactory.java index 786c6230349a4..60986fe58db6a 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/PhoneticTokenFilterFactory.java @@ -46,7 +46,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private boolean isDaitchMokotoff; public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); this.languageset = null; this.nametype = null; this.ruletype = null; diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseAnalyzerProvider.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseAnalyzerProvider.java index 668b94d0ca972..9dceac60b3c31 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseAnalyzerProvider.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseAnalyzerProvider.java @@ -20,7 +20,7 @@ public class SmartChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider< private final SmartChineseAnalyzer analyzer; public SmartChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); analyzer = new SmartChineseAnalyzer(SmartChineseAnalyzer.getDefaultStopSet()); } diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseNoOpTokenFilterFactory.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseNoOpTokenFilterFactory.java index 55cda67852272..41869b1a7222e 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseNoOpTokenFilterFactory.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseNoOpTokenFilterFactory.java @@ -18,7 +18,7 @@ public class SmartChineseNoOpTokenFilterFactory extends AbstractTokenFilterFactory { public SmartChineseNoOpTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(name, settings); + super(name); } @Override diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseStopTokenFilterFactory.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseStopTokenFilterFactory.java index 2463ad4a2c188..5688971d02865 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseStopTokenFilterFactory.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseStopTokenFilterFactory.java @@ -35,7 +35,7 @@ public class SmartChineseStopTokenFilterFactory extends AbstractTokenFilterFacto private final boolean removeTrailing; public SmartChineseStopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { - super(name, settings); + super(name); this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.stopWords = Analysis.parseWords( diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseTokenizerTokenizerFactory.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseTokenizerTokenizerFactory.java index 2545c9c7d94e8..2b0a9bfe00341 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseTokenizerTokenizerFactory.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/plugin/analysis/smartcn/SmartChineseTokenizerTokenizerFactory.java @@ -19,7 +19,7 @@ public class SmartChineseTokenizerTokenizerFactory extends AbstractTokenizerFactory { public SmartChineseTokenizerTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, settings, name); + super(name); } @Override diff --git a/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java b/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java index 68e7298473cd4..73f42930e8613 100644 --- a/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java +++ b/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishAnalyzerProvider.java @@ -20,7 +20,7 @@ public class PolishAnalyzerProvider extends AbstractIndexAnalyzerProvider(); + for (int i = 0; i < pluginCount; i++) { + pluginNames.add( + Objects.requireNonNull(nodesInfoResponse.evaluateExact("nodes", nodeId, "plugins", Integer.toString(i), "name")) + ); + } + assertThat(pluginNames, hasItem("discovery-ec2")); + } + } + +} diff --git a/plugins/discovery-ec2/src/yamlRestTest/java/org/elasticsearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java b/plugins/discovery-ec2/src/yamlRestTest/java/org/elasticsearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java deleted file mode 100644 index 7a8e4eed9f928..0000000000000 --- a/plugins/discovery-ec2/src/yamlRestTest/java/org/elasticsearch/discovery/ec2/CloudAwsClientYamlTestSuiteIT.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.discovery.ec2; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -public class CloudAwsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - - public CloudAwsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } -} diff --git a/plugins/discovery-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml b/plugins/discovery-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml deleted file mode 100644 index ba51c623fe888..0000000000000 --- a/plugins/discovery-ec2/src/yamlRestTest/resources/rest-api-spec/test/discovery_ec2/10_basic.yml +++ /dev/null @@ -1,16 +0,0 @@ -# Integration tests for Discovery EC2 component -# -"Discovery EC2 loaded": - - skip: - reason: "contains is a newly added assertion" - features: contains - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - contains: { nodes.$master.plugins: { name: discovery-ec2 } } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index eaf2429ae6258..e817384d95c04 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -16,10 +16,8 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.OperationPurpose; import java.io.IOException; -import java.util.Iterator; final class HdfsBlobStore implements BlobStore { @@ -72,11 +70,6 @@ public BlobContainer blobContainer(BlobPath path) { return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext, replicationFactor); } - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { - throw new UnsupportedOperationException("Bulk deletes are not supported in Hdfs repositories"); - } - private Path buildHdfsPath(BlobPath blobPath) { final Path path = translateToHdfsPath(blobPath); if (readOnly == false) { diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 17927b02a08dc..3e1c112a4d9f7 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -46,11 +46,6 @@ public void testSnapshotAndRestore() throws Exception { testSnapshotAndRestore(false); } - @Override - public void testBlobStoreBulkDeletion() throws Exception { - // HDFS does not implement bulk deletion from different BlobContainers - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(HdfsPlugin.class); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java index d98d53baf9015..f907870fc8254 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java @@ -263,7 +263,7 @@ private String getRollupIndexName() throws IOException { if (asMap.size() == 1) { return (String) asMap.keySet().toArray()[0]; } - logger.warn("--> No matching rollup name for path [%s]", endpoint); + logger.warn("--> No matching rollup name for path [{}]", endpoint); return null; } diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 1066bf1360e41..ed6205c7a5208 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -125,14 +125,14 @@ public void testDeprecatedMessageWithoutXOpaqueId() throws IOException { jsonLogs, contains( allOf( - hasEntry("event.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), hasEntry("log.level", "CRITICAL"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), hasEntry("elasticsearch.cluster.name", "elasticsearch"), hasEntry("elasticsearch.node.name", "sample-name"), hasEntry("message", "deprecated message1"), hasEntry("data_stream.type", "logs"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasKey("ecs.version"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "a key"), @@ -168,8 +168,8 @@ public void testCompatibleLog() throws Exception { contains( allOf( hasEntry("log.level", "CRITICAL"), - hasEntry("event.dataset", "deprecation.elasticsearch"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasEntry("data_stream.type", "logs"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), @@ -186,8 +186,8 @@ public void testCompatibleLog() throws Exception { allOf( hasEntry("log.level", "CRITICAL"), // event.dataset and data_stream.dataset have to be the same across the data stream - hasEntry("event.dataset", "deprecation.elasticsearch"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasEntry("data_stream.type", "logs"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), @@ -240,8 +240,8 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { // deprecation log for field deprecated_name allOf( hasEntry("log.level", "WARN"), - hasEntry("event.dataset", "deprecation.elasticsearch"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasEntry("data_stream.type", "logs"), hasEntry("log.logger", "org.elasticsearch.deprecation.xcontent.ParseField"), @@ -258,8 +258,8 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { // deprecation log for field deprecated_name2 (note it is not being throttled) allOf( hasEntry("log.level", "WARN"), - hasEntry("event.dataset", "deprecation.elasticsearch"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasEntry("data_stream.type", "logs"), hasEntry("log.logger", "org.elasticsearch.deprecation.xcontent.ParseField"), @@ -276,8 +276,8 @@ public void testParseFieldEmittingDeprecatedLogs() throws Exception { // compatible log line allOf( hasEntry("log.level", "CRITICAL"), - hasEntry("event.dataset", "deprecation.elasticsearch"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasEntry("data_stream.type", "logs"), hasEntry("log.logger", "org.elasticsearch.deprecation.xcontent.ParseField"), @@ -327,14 +327,14 @@ public void testDeprecatedMessage() throws Exception { jsonLogs, contains( allOf( - hasEntry("event.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), hasEntry("log.level", "WARN"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), hasEntry("elasticsearch.cluster.name", "elasticsearch"), hasEntry("elasticsearch.node.name", "sample-name"), hasEntry("message", "deprecated message1"), hasEntry("data_stream.type", "logs"), - hasEntry("data_stream.dataset", "deprecation.elasticsearch"), + hasEntry("data_stream.dataset", "elasticsearch.deprecation"), hasEntry("data_stream.namespace", "default"), hasKey("ecs.version"), hasEntry(DeprecatedMessage.KEY_FIELD_NAME, "someKey"), @@ -579,7 +579,7 @@ public void testDuplicateLogMessages() throws Exception { jsonLogs, contains( allOf( - hasEntry("event.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), hasEntry("log.level", "CRITICAL"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), hasEntry("elasticsearch.cluster.name", "elasticsearch"), @@ -612,7 +612,7 @@ public void testDuplicateLogMessages() throws Exception { jsonLogs, contains( allOf( - hasEntry("event.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), hasEntry("log.level", "CRITICAL"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), hasEntry("elasticsearch.cluster.name", "elasticsearch"), @@ -622,7 +622,7 @@ public void testDuplicateLogMessages() throws Exception { hasEntry("elasticsearch.event.category", "other") ), allOf( - hasEntry("event.dataset", "deprecation.elasticsearch"), + hasEntry("event.dataset", "elasticsearch.deprecation"), hasEntry("log.level", "CRITICAL"), hasEntry("log.logger", "org.elasticsearch.deprecation.test"), hasEntry("elasticsearch.cluster.name", "elasticsearch"), diff --git a/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties index 46baac4f1433c..b00caca66d03c 100644 --- a/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties +++ b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties @@ -15,14 +15,13 @@ appender.deprecated.name = deprecated appender.deprecated.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecated.json # Intentionally follows a different pattern to above appender.deprecated.layout.type = ECSJsonLayout -appender.deprecated.layout.dataset = deprecation.elasticsearch +appender.deprecated.layout.dataset = elasticsearch.deprecation appender.deprecated.filter.rate_limit.type = RateLimitingFilter appender.deprecatedconsole.type = Console appender.deprecatedconsole.name = deprecatedconsole appender.deprecatedconsole.layout.type = ECSJsonLayout -# Intentionally follows a different pattern to above -appender.deprecatedconsole.layout.dataset = deprecation.elasticsearch +appender.deprecatedconsole.layout.dataset = elasticsearch.deprecation appender.deprecatedconsole.filter.rate_limit.type = RateLimitingFilter diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java index bca0c26ad2c32..b1212913b7fb0 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java @@ -238,7 +238,7 @@ private String getRollupIndexName() throws IOException { if (asMap.size() == 1) { return (String) asMap.keySet().toArray()[0]; } - logger.warn("--> No matching rollup name for path [%s]", endpoint); + logger.warn("--> No matching rollup name for path [{}]", endpoint); return null; } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java index 369d0824bdb28..3faa88339f0a3 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java @@ -74,8 +74,7 @@ public void testBulkInvalidIndexNameString() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); - assertThat(responseException.getMessage(), containsString("could not parse bulk request body")); - assertThat(responseException.getMessage(), containsString("json_parse_exception")); + assertThat(responseException.getMessage(), containsString("x_content_parse_exception")); assertThat(responseException.getMessage(), containsString("Invalid UTF-8")); } diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index e2af894eb0939..7347d9c1312dd 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -67,4 +67,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("logsdb/20_source_mapping/include/exclude is supported with stored _source", "no longer serialize source_mode") task.skipTest("logsdb/20_source_mapping/synthetic _source is default", "no longer serialize source_mode") task.skipTest("search/520_fetch_fields/fetch _seq_no via fields", "error code is changed from 5xx to 400 in 9.0") + task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions") + task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 9ced5d3e8c454..f9c8041d7221f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -56,10 +56,6 @@ "type":"time", "description":"Explicit operation timeout" }, - "type":{ - "type":"string", - "description":"Default document type for items which don't provide one" - }, "_source":{ "type":"list", "description":"True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request" @@ -78,11 +74,11 @@ }, "require_alias": { "type": "boolean", - "description": "Sets require_alias for all incoming documents. Defaults to unset (false)" + "description": "If true, the request’s actions must target an index alias. Defaults to false." }, "require_data_stream": { "type": "boolean", - "description": "When true, requires the destination to be a data stream (existing or to-be-created). Default is false" + "description": "If true, the request's actions must target a data stream (existing or to-be-created). Default to false" }, "list_executed_pipelines": { "type": "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index d3856b455efd1..9f97fe6280dc0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -65,6 +65,11 @@ ], "default": "all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "master_timeout":{ + "type":"time", + "description":"Timeout for waiting for new cluster state in case it is blocked", + "default":"30s" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index 7d7a9c96c6419..7c855335efd00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -61,6 +61,11 @@ ], "default":"all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "master_timeout":{ + "type":"time", + "description":"Timeout for waiting for new cluster state in case it is blocked", + "default":"30s" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json index dc02a65adb068..a360582a44a04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json @@ -79,6 +79,11 @@ ], "default": "all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "master_timeout":{ + "type":"time", + "description":"Timeout for waiting for new cluster state in case it is blocked", + "default":"30s" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json new file mode 100644 index 0000000000000..057269598a7d8 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json @@ -0,0 +1,31 @@ +{ + "migrate.get_reindex_status":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API returns the status of a migration reindex attempt for a data stream or index" + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_migration/reindex/{index}/_status", + "methods":[ + "GET" + ], + "parts":{ + "index":{ + "type":"string", + "description":"The index or data stream name" + } + } + } + ] + } + } +} + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.reindex.json new file mode 100644 index 0000000000000..149a90bc198b0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.reindex.json @@ -0,0 +1,29 @@ +{ + "migrate.reindex":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API reindexes all legacy backing indices for a data stream. It does this in a persistent task. The persistent task id is returned immediately, and the reindexing work is completed in that task" + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_migration/reindex", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The body contains the fields `mode` and `source.index, where the only mode currently supported is `upgrade`, and the `source.index` must be a data stream name", + "required":true + } + } +} + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml index d08a8e2a6d39c..e49f0634a4887 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml @@ -18,7 +18,7 @@ setup: dims: 5 index: true index_options: - type: hnsw + type: int8_hnsw similarity: l2_norm - do: @@ -73,3 +73,59 @@ setup: - match: {hits.total.value: 1} - match: {hits.hits.0._id: "3"} - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + +--- +"Vector rescoring has no effect for non-quantized vectors and provides same results as non-rescored knn": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: index1 + body: + knn: + field: vector + query_vector: [2, 2, 2, 2, 3] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + # Exact knn via script score + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: index1 + body: + query: + script_score: + query: {match_all: {} } + script: + source: "1.0 / (1.0 + Math.pow(l2norm(params.query_vector, 'vector'), 2.0))" + params: + query_vector: [2, 2, 2, 2, 3] + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml new file mode 100644 index 0000000000000..d4bf5e7e9807f --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml @@ -0,0 +1,137 @@ +setup: + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [ capabilities ] + capabilities: + - method: GET + path: /_search + capabilities: [ knn_quantized_vector_rescore ] + - skip: + features: "headers" + + - do: + indices.create: + index: bbq_hnsw + body: + settings: + index: + number_of_shards: 1 + mappings: + properties: + vector: + type: dense_vector + dims: 64 + index: true + similarity: max_inner_product + index_options: + type: bbq_hnsw + + - do: + index: + index: bbq_hnsw + id: "1" + body: + vector: [0.077, 0.32 , -0.205, 0.63 , 0.032, 0.201, 0.167, -0.313, + 0.176, 0.531, -0.375, 0.334, -0.046, 0.078, -0.349, 0.272, + 0.307, -0.083, 0.504, 0.255, -0.404, 0.289, -0.226, -0.132, + -0.216, 0.49 , 0.039, 0.507, -0.307, 0.107, 0.09 , -0.265, + -0.285, 0.336, -0.272, 0.369, -0.282, 0.086, -0.132, 0.475, + -0.224, 0.203, 0.439, 0.064, 0.246, -0.396, 0.297, 0.242, + -0.028, 0.321, -0.022, -0.009, -0.001 , 0.031, -0.533, 0.45, + -0.683, 1.331, 0.194, -0.157, -0.1 , -0.279, -0.098, -0.176] + # Flush in order to provoke a merge later + - do: + indices.flush: + index: bbq_hnsw + + - do: + index: + index: bbq_hnsw + id: "2" + body: + vector: [0.196, 0.514, 0.039, 0.555, -0.042, 0.242, 0.463, -0.348, + -0.08 , 0.442, -0.067, -0.05 , -0.001, 0.298, -0.377, 0.048, + 0.307, 0.159, 0.278, 0.119, -0.057, 0.333, -0.289, -0.438, + -0.014, 0.361, -0.169, 0.292, -0.229, 0.123, 0.031, -0.138, + -0.139, 0.315, -0.216, 0.322, -0.445, -0.059, 0.071, 0.429, + -0.602, -0.142, 0.11 , 0.192, 0.259, -0.241, 0.181, -0.166, + 0.082, 0.107, -0.05 , 0.155, 0.011, 0.161, -0.486, 0.569, + -0.489, 0.901, 0.208, 0.011, -0.209, -0.153, -0.27 , -0.013] + # Flush in order to provoke a merge later + - do: + indices.flush: + index: bbq_hnsw + + - do: + index: + index: bbq_hnsw + id: "3" + body: + name: rabbit.jpg + vector: [0.139, 0.178, -0.117, 0.399, 0.014, -0.139, 0.347, -0.33 , + 0.139, 0.34 , -0.052, -0.052, -0.249, 0.327, -0.288, 0.049, + 0.464, 0.338, 0.516, 0.247, -0.104, 0.259, -0.209, -0.246, + -0.11 , 0.323, 0.091, 0.442, -0.254, 0.195, -0.109, -0.058, + -0.279, 0.402, -0.107, 0.308, -0.273, 0.019, 0.082, 0.399, + -0.658, -0.03 , 0.276, 0.041, 0.187, -0.331, 0.165, 0.017, + 0.171, -0.203, -0.198, 0.115, -0.007, 0.337, -0.444, 0.615, + -0.657, 1.285, 0.2 , -0.062, 0.038, 0.089, -0.068, -0.058] + # Flush in order to provoke a merge later + - do: + indices.flush: + index: bbq_hnsw + + - do: + indices.forcemerge: + index: bbq_hnsw + max_num_segments: 1 +--- +"Profile rescored knn search": + + - do: + search: + index: bbq_hnsw + body: + profile: true + knn: + field: vector + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 , + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] + k: 3 + num_candidates: 3 + "rescore_vector": + "num_candidates_factor": 2.0 + + # We expect the knn search ops + rescoring num_cnaidates (for rescoring) per shard + - match: { profile.shards.0.dfs.knn.0.vector_operations_count: 6 } + + # Search with similarity to check number of operations are propagated correctly + - do: + search: + index: bbq_hnsw + body: + profile: true + knn: + field: vector + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 , + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] + k: 3 + num_candidates: 3 + similarity: 100000 + "rescore_vector": + "num_candidates_factor": 2.0 + + # We expect the knn search ops + rescoring num_cnaidates (for rescoring) per shard + - match: { profile.shards.0.dfs.knn.0.vector_operations_count: 6 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/30_multi_dense_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/30_rank_vectors.yml similarity index 88% rename from rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/30_multi_dense_vector.yml rename to rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/30_rank_vectors.yml index 80d1d25dfcbd8..ecf34f46c3383 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/30_multi_dense_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/30_rank_vectors.yml @@ -3,9 +3,9 @@ setup: capabilities: - method: POST path: /_search - capabilities: [ multi_dense_vector_field_mapper ] + capabilities: [ rank_vectors_field_mapper ] test_runner_features: capabilities - reason: "Support for multi dense vector field mapper capability required" + reason: "Support for rank vectors field mapper capability required" --- "Test create multi-vector field": - do: @@ -15,7 +15,7 @@ setup: mappings: properties: vector1: - type: multi_dense_vector + type: rank_vectors dims: 3 - do: index: @@ -48,7 +48,7 @@ setup: name: type: keyword vector1: - type: multi_dense_vector + type: rank_vectors - do: index: index: test @@ -88,7 +88,7 @@ setup: mappings: properties: vector1: - type: multi_dense_vector + type: rank_vectors - do: catch: bad_request index: @@ -105,7 +105,7 @@ setup: mappings: properties: vector1: - type: multi_dense_vector + type: rank_vectors dims: 3 - do: catch: bad_request @@ -123,7 +123,7 @@ setup: mappings: properties: vector1: - type: multi_dense_vector + type: rank_vectors dims: 3 - do: catch: bad_request diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index b3d86a066550e..7d4690204acc7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -215,8 +215,11 @@ setup: --- "kNN search in _knn_search endpoint": - skip: - features: [ "allowed_warnings" ] + features: [ "allowed_warnings", "headers" ] - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: @@ -240,8 +243,11 @@ setup: - requires: cluster_features: "gte_v8.2.0" reason: 'kNN with filtering added in 8.2' - test_runner_features: [ "allowed_warnings" ] + test_runner_features: [ "allowed_warnings", "headers" ] - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: @@ -262,6 +268,9 @@ setup: - match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: @@ -541,3 +550,58 @@ setup: num_candidates: 3 - match: { hits.total.value: 0 } +--- +"Vector rescoring has no effect for non-quantized vectors and provides same results as non-rescored knn": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Non-rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 3 + + # Get scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: knn_score0 } + - set: { hits.hits.1._score: knn_score1 } + - set: { hits.hits.2._score: knn_score2 } + + # Rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $knn_score0 } + - match: { hits.hits.1._score: $knn_score1 } + - match: { hits.hits.2._score: $knn_score2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml index 188c155e4a836..2567a4ac597d9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml @@ -11,20 +11,11 @@ setup: number_of_shards: 1 mappings: properties: - name: - type: keyword vector: type: dense_vector dims: 64 index: true - similarity: l2_norm - index_options: - type: bbq_hnsw - another_vector: - type: dense_vector - dims: 64 - index: true - similarity: l2_norm + similarity: max_inner_product index_options: type: bbq_hnsw @@ -33,9 +24,14 @@ setup: index: bbq_hnsw id: "1" body: - name: cow.jpg - vector: [300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0] - another_vector: [115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0] + vector: [0.077, 0.32 , -0.205, 0.63 , 0.032, 0.201, 0.167, -0.313, + 0.176, 0.531, -0.375, 0.334, -0.046, 0.078, -0.349, 0.272, + 0.307, -0.083, 0.504, 0.255, -0.404, 0.289, -0.226, -0.132, + -0.216, 0.49 , 0.039, 0.507, -0.307, 0.107, 0.09 , -0.265, + -0.285, 0.336, -0.272, 0.369, -0.282, 0.086, -0.132, 0.475, + -0.224, 0.203, 0.439, 0.064, 0.246, -0.396, 0.297, 0.242, + -0.028, 0.321, -0.022, -0.009, -0.001 , 0.031, -0.533, 0.45, + -0.683, 1.331, 0.194, -0.157, -0.1 , -0.279, -0.098, -0.176] # Flush in order to provoke a merge later - do: indices.flush: @@ -46,9 +42,14 @@ setup: index: bbq_hnsw id: "2" body: - name: moose.jpg - vector: [100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0] - another_vector: [50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120] + vector: [0.196, 0.514, 0.039, 0.555, -0.042, 0.242, 0.463, -0.348, + -0.08 , 0.442, -0.067, -0.05 , -0.001, 0.298, -0.377, 0.048, + 0.307, 0.159, 0.278, 0.119, -0.057, 0.333, -0.289, -0.438, + -0.014, 0.361, -0.169, 0.292, -0.229, 0.123, 0.031, -0.138, + -0.139, 0.315, -0.216, 0.322, -0.445, -0.059, 0.071, 0.429, + -0.602, -0.142, 0.11 , 0.192, 0.259, -0.241, 0.181, -0.166, + 0.082, 0.107, -0.05 , 0.155, 0.011, 0.161, -0.486, 0.569, + -0.489, 0.901, 0.208, 0.011, -0.209, -0.153, -0.27 , -0.013] # Flush in order to provoke a merge later - do: indices.flush: @@ -60,8 +61,14 @@ setup: id: "3" body: name: rabbit.jpg - vector: [111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0] - another_vector: [11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0] + vector: [0.139, 0.178, -0.117, 0.399, 0.014, -0.139, 0.347, -0.33 , + 0.139, 0.34 , -0.052, -0.052, -0.249, 0.327, -0.288, 0.049, + 0.464, 0.338, 0.516, 0.247, -0.104, 0.259, -0.209, -0.246, + -0.11 , 0.323, 0.091, 0.442, -0.254, 0.195, -0.109, -0.058, + -0.279, 0.402, -0.107, 0.308, -0.273, 0.019, 0.082, 0.399, + -0.658, -0.03 , 0.276, 0.041, 0.187, -0.331, 0.165, 0.017, + 0.171, -0.203, -0.198, 0.115, -0.007, 0.337, -0.444, 0.615, + -0.657, 1.285, 0.2 , -0.062, 0.038, 0.089, -0.068, -0.058] # Flush in order to provoke a merge later - do: indices.flush: @@ -73,19 +80,101 @@ setup: max_num_segments: 1 --- "Test knn search": + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ optimized_scalar_quantization_bbq ] + test_runner_features: capabilities + reason: "BBQ scoring improved and changed with optimized_scalar_quantization_bbq" - do: search: index: bbq_hnsw body: knn: field: vector - query_vector: [ 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0] + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 , + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] k: 3 num_candidates: 3 - # Depending on how things are distributed, docs 2 and 3 might be swapped - # here we verify that are last hit is always the worst one - - match: { hits.hits.2._id: "1" } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "2" } +--- +"Vector rescoring has same scoring as exact search for kNN section": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: bbq_hnsw + body: + knn: + field: vector + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 , + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + # Exact knn via script score + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "double similarity = dotProduct(params.query_vector, 'vector'); return similarity < 0 ? 1 / (1 + -1 * similarity) : similarity + 1" + params: + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 , + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } --- "Test bad quantization parameters": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml index b7a5517309949..b1e35789e8737 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml @@ -368,6 +368,65 @@ setup: - match: {hits.hits.2._id: "1"} - gte: {hits.hits.2._score: 0.78} - lte: {hits.hits.2._score: 0.791} + +--- +# Won't be true for larger datasets, but this helps checking kNN vs rescoring vs exact search +"Vector rescoring has the same scoring as exact search for kNN section": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: hnsw_byte_quantized + body: + size: 3 + query: + knn: + k: 3 + num_candidates: 3 + field: vector + query_vector: [0.5, 111.3, -13.0, 14.8, -156.0] + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + # Exact knn via script score + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "1.0 / (1.0 + Math.pow(l2norm(params.query_vector, 'vector'), 2.0))" + params: + query_vector: [0.5, 111.3, -13.0, 14.8, -156.0] + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } + --- "Test bad quantization parameters": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml index 5f1af2ca5c52f..54e9eadf42e0b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml @@ -549,6 +549,62 @@ setup: - match: { hits.hits.1._id: "2"} - match: { hits.hits.2._id: "3"} --- +"Vector rescoring has the same scoring as exact search for kNN section": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore + - do: + headers: + Content-Type: application/json + search: + index: hnsw_byte_quantized + rest_total_hits_as_int: true + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + # Exact knn via script score + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "1.0 / (1.0 + Math.pow(l2norm(params.query_vector, 'vector'), 2.0))" + params: + query_vector: [-0.5, 90.0, -10, 14.8] + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } + +--- "Test odd dimensions fail indexing": - do: catch: bad_request diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml index ed7a8dd5df65d..a3cd624ef0ab8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml @@ -11,20 +11,11 @@ setup: number_of_shards: 1 mappings: properties: - name: - type: keyword vector: type: dense_vector dims: 64 index: true - similarity: l2_norm - index_options: - type: bbq_flat - another_vector: - type: dense_vector - dims: 64 - index: true - similarity: l2_norm + similarity: max_inner_product index_options: type: bbq_flat @@ -33,9 +24,14 @@ setup: index: bbq_flat id: "1" body: - name: cow.jpg - vector: [300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0] - another_vector: [115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0] + vector: [0.077, 0.32 , -0.205, 0.63 , 0.032, 0.201, 0.167, -0.313, + 0.176, 0.531, -0.375, 0.334, -0.046, 0.078, -0.349, 0.272, + 0.307, -0.083, 0.504, 0.255, -0.404, 0.289, -0.226, -0.132, + -0.216, 0.49 , 0.039, 0.507, -0.307, 0.107, 0.09 , -0.265, + -0.285, 0.336, -0.272, 0.369, -0.282, 0.086, -0.132, 0.475, + -0.224, 0.203, 0.439, 0.064, 0.246, -0.396, 0.297, 0.242, + -0.028, 0.321, -0.022, -0.009, -0.001 , 0.031, -0.533, 0.45, + -0.683, 1.331, 0.194, -0.157, -0.1 , -0.279, -0.098, -0.176] # Flush in order to provoke a merge later - do: indices.flush: @@ -46,9 +42,14 @@ setup: index: bbq_flat id: "2" body: - name: moose.jpg - vector: [100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0] - another_vector: [50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120] + vector: [0.196, 0.514, 0.039, 0.555, -0.042, 0.242, 0.463, -0.348, + -0.08 , 0.442, -0.067, -0.05 , -0.001, 0.298, -0.377, 0.048, + 0.307, 0.159, 0.278, 0.119, -0.057, 0.333, -0.289, -0.438, + -0.014, 0.361, -0.169, 0.292, -0.229, 0.123, 0.031, -0.138, + -0.139, 0.315, -0.216, 0.322, -0.445, -0.059, 0.071, 0.429, + -0.602, -0.142, 0.11 , 0.192, 0.259, -0.241, 0.181, -0.166, + 0.082, 0.107, -0.05 , 0.155, 0.011, 0.161, -0.486, 0.569, + -0.489, 0.901, 0.208, 0.011, -0.209, -0.153, -0.27 , -0.013] # Flush in order to provoke a merge later - do: indices.flush: @@ -59,9 +60,14 @@ setup: index: bbq_flat id: "3" body: - name: rabbit.jpg - vector: [111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0] - another_vector: [11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0] + vector: [0.139, 0.178, -0.117, 0.399, 0.014, -0.139, 0.347, -0.33 , + 0.139, 0.34 , -0.052, -0.052, -0.249, 0.327, -0.288, 0.049, + 0.464, 0.338, 0.516, 0.247, -0.104, 0.259, -0.209, -0.246, + -0.11 , 0.323, 0.091, 0.442, -0.254, 0.195, -0.109, -0.058, + -0.279, 0.402, -0.107, 0.308, -0.273, 0.019, 0.082, 0.399, + -0.658, -0.03 , 0.276, 0.041, 0.187, -0.331, 0.165, 0.017, + 0.171, -0.203, -0.198, 0.115, -0.007, 0.337, -0.444, 0.615, + -0.657, 1.285, 0.2 , -0.062, 0.038, 0.089, -0.068, -0.058] # Flush in order to provoke a merge later - do: indices.flush: @@ -73,19 +79,102 @@ setup: max_num_segments: 1 --- "Test knn search": + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ optimized_scalar_quantization_bbq ] + test_runner_features: capabilities + reason: "BBQ scoring improved and changed with optimized_scalar_quantization_bbq" + - do: + search: + index: bbq_flat + body: + knn: + field: vector + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 , + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] + k: 3 + num_candidates: 3 + + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "2" } +--- +"Vector rescoring has same scoring as exact search for kNN section": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore - do: + headers: + Content-Type: application/json search: + rest_total_hits_as_int: true index: bbq_flat body: knn: field: vector - query_vector: [ 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0] + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17, + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] k: 3 num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + # Exact knn via script score + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: bbq_flat + body: + query: + script_score: + query: { match_all: {} } + script: + source: "double similarity = dotProduct(params.query_vector, 'vector'); return similarity < 0 ? 1 / (1 + -1 * similarity) : similarity + 1" + params: + query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393, + 0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015, + 0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259, + -0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17, + -0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232, + -0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034, + -0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582, + -0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158] + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } - # Depending on how things are distributed, docs 2 and 3 might be swapped - # here we verify that are last hit is always the worst one - - match: { hits.hits.2._id: "1" } --- "Test bad parameters": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml index 1b439967ba163..a59aedceff3d3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml @@ -257,6 +257,61 @@ setup: - gte: {hits.hits.2._score: 0.78} - lte: {hits.hits.2._score: 0.791} --- +"Vector rescoring has no effect for non-quantized vectors and provides same results as non-rescored knn": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Non-rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: flat + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 3 + + # Get scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: knn_score0 } + - set: { hits.hits.1._score: knn_score1 } + - set: { hits.hits.2._score: knn_score2 } + + # Rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: flat + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $knn_score0 } + - match: { hits.hits.1._score: $knn_score1 } + - match: { hits.hits.2._score: $knn_score2 } +--- "Test bad parameters": - do: catch: bad_request diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml index b9a0b16f2bd7a..6796a92122f9a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml @@ -344,3 +344,58 @@ setup: index: dynamic_dim_hnsw_quantized body: vector: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] +--- +"Vector rescoring has the same scoring as exact search for kNN section": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore + - do: + headers: + Content-Type: application/json + search: + index: int4_flat + rest_total_hits_as_int: true + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + # Exact knn via script score + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "1.0 / (1.0 + Math.pow(l2norm(params.query_vector, 'vector'), 2.0))" + params: + query_vector: [-0.5, 90.0, -10, 14.8] + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml index 139747c5e7ee5..d1d312449cb70 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml @@ -262,6 +262,60 @@ setup: - gte: {hits.hits.2._score: 0.78} - lte: {hits.hits.2._score: 0.791} --- +"Vector rescoring has the same scoring as exact search for kNN section": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Rescore + - do: + headers: + Content-Type: application/json + search: + index: int8_flat + rest_total_hits_as_int: true + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: rescore_score0 } + - set: { hits.hits.1._score: rescore_score1 } + - set: { hits.hits.2._score: rescore_score2 } + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "1.0 / (1.0 + Math.pow(l2norm(params.query_vector, 'vector'), 2.0))" + params: + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + + # Get rescoring scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $rescore_score0 } + - match: { hits.hits.1._score: $rescore_score1 } + - match: { hits.hits.2._score: $rescore_score2 } +--- "Test bad parameters": - do: catch: bad_request diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml index 02576ad1b2b01..effa3fff61525 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml @@ -405,3 +405,59 @@ setup: - match: {hits.hits.0._id: "1"} - match: {hits.hits.0._source.vector1: [2, -1, 1, 4, -3]} - match: {hits.hits.0._source.vector2: [2, -1, 1, 4, -3]} + +--- +"Vector rescoring has no effect for non-quantized vectors and provides same results as non-rescored knn": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Non-rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127.0, -128.0, 0.0, 1.0, -1.0] + k: 3 + num_candidates: 3 + + # Get scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: knn_score0 } + - set: { hits.hits.1._score: knn_score1 } + - set: { hits.hits.2._score: knn_score2 } + + # Rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127.0, -128.0, 0.0, 1.0, -1.0] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $knn_score0 } + - match: { hits.hits.1._score: $knn_score1 } + - match: { hits.hits.2._score: $knn_score2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml index ec7bde4de8435..cdc1d9c64763e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml @@ -221,3 +221,59 @@ setup: similarity: l2_norm index_options: type: int8_hnsw + +--- +"Vector rescoring has no effect for non-quantized vectors and provides same results as non-rescored knn": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Non-rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127, 127, -128, -128, 127] + k: 3 + num_candidates: 3 + + # Get scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: knn_score0 } + - set: { hits.hits.1._score: knn_score1 } + - set: { hits.hits.2._score: knn_score2 } + + # Rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127, 127, -128, -128, 127] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $knn_score0 } + - match: { hits.hits.1._score: $knn_score1 } + - match: { hits.hits.2._score: $knn_score2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index 0cedfaa873095..213b571a0b4be 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -254,3 +254,60 @@ setup: filter: {"term": {"name": "cow.jpg"}} - length: {hits.hits: 0} + +--- +"Vector rescoring has no effect for non-quantized vectors and provides same results as non-rescored knn": + - requires: + reason: 'Quantized vector rescoring is required' + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [knn_quantized_vector_rescore] + - skip: + features: "headers" + + # Non-rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127, 127, -128, -128, 127] + k: 3 + num_candidates: 3 + + # Get scores - hit ordering may change depending on how things are distributed + - match: { hits.total: 3 } + - set: { hits.hits.0._score: knn_score0 } + - set: { hits.hits.1._score: knn_score1 } + - set: { hits.hits.2._score: knn_score2 } + + # Rescored knn + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + index: test + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [127, 127, -128, -128, 127] + k: 3 + num_candidates: 3 + rescore_vector: + num_candidates_factor: 1.5 + + # Compare scores as hit IDs may change depending on how things are distributed + - match: { hits.total: 3 } + - match: { hits.hits.0._score: $knn_score0 } + - match: { hits.hits.1._score: $knn_score1 } + - match: { hits.hits.2._score: $knn_score2 } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml index db0437637fc20..81e6a9f91c101 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml @@ -55,6 +55,9 @@ setup: reason: 'dense_vector field usage was added in 8.1' test_runner_features: ["allowed_warnings"] - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml index 2505e6d7e353b..0b65a69bf500e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -472,3 +472,120 @@ - match: _source.ml.tokens: {} + +--- +"stored sparse_vector": + + - requires: + cluster_features: [ "mapper.sparse_vector.store_support" ] + reason: "sparse_vector supports store parameter" + + - do: + indices.create: + index: test + body: + mappings: + properties: + ml.tokens: + type: sparse_vector + store: true + + - match: { acknowledged: true } + - do: + index: + index: test + id: "1" + body: + ml: + tokens: + running: 2 + good: 3 + run: 5 + race: 7 + for: 9 + + - match: { result: "created" } + + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "ml.tokens" ] + + - length: { hits.hits.0.fields.ml\\.tokens: 1 } + - length: { hits.hits.0.fields.ml\\.tokens.0: 5 } + - match: { hits.hits.0.fields.ml\\.tokens.0.running: 2.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.good: 3.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.run: 5.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.race: 7.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.for: 9.0 } + +--- +"stored sparse_vector synthetic source": + + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting", "mapper.sparse_vector.store_support" ] + reason: "sparse_vector supports store parameter" + + - do: + indices.create: + index: test + body: + settings: + index: + mapping.source.mode: synthetic + mappings: + properties: + ml.tokens: + type: sparse_vector + store: true + + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + body: + ml: + tokens: + running: 2 + good: 3 + run: 5 + race: 7 + for: 9 + + - match: { result: "created" } + + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "ml.tokens" ] + + - match: + hits.hits.0._source: { + ml: { + tokens: { + running: 2.0, + good: 3.0, + run: 5.0, + race: 7.0, + for: 9.0 + } + } + } + + - length: { hits.hits.0.fields.ml\\.tokens: 1 } + - length: { hits.hits.0.fields.ml\\.tokens.0: 5 } + - match: { hits.hits.0.fields.ml\\.tokens.0.running: 2.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.good: 3.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.run: 5.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.race: 7.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.for: 9.0 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java index 25678939cb375..9e578faaac70c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeRoles; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -24,7 +23,7 @@ public class RemoteInfoIT extends AbstractMultiClustersTestCase { @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { if (randomBoolean()) { return List.of(); } else { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRemoteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRemoteIT.java index 6cc9824245247..5f4315abff405 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRemoteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRemoteIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.junit.Assert; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -51,7 +50,7 @@ protected boolean reuseClusters() { } @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE1, REMOTE2); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index b6930d06c11ec..47f96aebacd7d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.test.ESIntegTestCase; @@ -26,6 +27,7 @@ import static org.elasticsearch.action.admin.indices.create.ShrinkIndexIT.assertNoResizeSourceIndexSettings; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -143,6 +145,51 @@ public void testResizeChangeSyntheticSource() { assertThat(error.getMessage(), containsString("can't change setting [index.mapping.source.mode] during resize")); } + public void testResizeChangeRecoveryUseSyntheticSource() { + prepareCreate("source").setSettings( + indexSettings(between(1, 5), 0).put("index.mode", "logsdb") + .put( + "index.version.created", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY, + IndexVersion.current() + ) + ) + ).setMapping("@timestamp", "type=date", "host.name", "type=keyword").get(); + updateIndexSettings(Settings.builder().put("index.blocks.write", true), "source"); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + indicesAdmin().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings( + Settings.builder() + .put( + "index.version.created", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY, + IndexVersion.current() + ) + ) + .put("index.recovery.use_synthetic_source", true) + .put("index.mode", "logsdb") + .putNull("index.blocks.write") + .build() + ) + .get(); + }); + // The index.recovery.use_synthetic_source setting requires either index.mode or index.mapping.source.mode + // to be present in the settings. Since these are all unmodifiable settings with a non-deterministic evaluation + // order, any of them may trigger a failure first. + assertThat( + error.getMessage(), + anyOf( + containsString("can't change setting [index.mode] during resize"), + containsString("can't change setting [index.recovery.use_synthetic_source] during resize") + ) + ); + } + public void testResizeChangeIndexSorts() { prepareCreate("source").setSettings(indexSettings(between(1, 5), 0)) .setMapping("@timestamp", "type=date", "host.name", "type=keyword") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index ed92e7704f4ba..7a75313d44189 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -44,7 +44,7 @@ public class CCSPointInTimeIT extends AbstractMultiClustersTestCase { public static final String REMOTE_CLUSTER = "remote_cluster"; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 13515d34ec65f..545b38f30ba94 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -261,10 +261,11 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { GetResponse getResponse = clientToMasterlessNode.prepareGet("test1", "1").get(); assertExists(getResponse); - assertHitCount(clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0), 1L); - - logger.info("--> here 3"); - assertHitCount(clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true), 1L); + assertHitCount( + 1L, + clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0), + clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true) + ); assertResponse(clientToMasterlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0), countResponse -> { assertThat(countResponse.getTotalShards(), equalTo(3)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 1c5d67d1fa40a..70689dc689673 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -336,7 +336,7 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final int length = randomIntBetween(1, 8); final Map currentRetentionLeases = new LinkedHashMap<>(); - logger.info("adding retention [{}}] leases", length); + logger.info("adding retention [{}] leases", length); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 6ffd5808cea73..870947db5bd85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -715,7 +715,15 @@ public void testShardChangesWithDefaultDocType() throws Exception { } IndexShard shard = indexService.getShard(0); try ( - Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot("test", 0, numOps - 1, true, randomBoolean(), randomBoolean()); + Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot( + "test", + 0, + numOps - 1, + true, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ); Translog.Snapshot translogSnapshot = getTranslog(shard).newSnapshot() ) { List opsFromLucene = TestTranslog.drainSnapshot(luceneSnapshot, true); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index f41277c5b80ca..545ed83bb79c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -398,8 +398,11 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { public void testAllMissingLenient() throws Exception { createIndex("test1"); prepareIndex("test1").setId("1").setSource("k", "v").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(prepareSearch("test2").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()), 0L); - assertHitCount(prepareSearch("test2", "test3").setQuery(matchAllQuery()).setIndicesOptions(IndicesOptions.lenientExpandOpen()), 0L); + assertHitCount( + 0L, + prepareSearch("test2").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()), + prepareSearch("test2", "test3").setQuery(matchAllQuery()).setIndicesOptions(IndicesOptions.lenientExpandOpen()) + ); // you should still be able to run empty searches without things blowing up assertHitCount(prepareSearch().setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(matchAllQuery()), 1L); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java index 1a6674edc5147..4bdc5d63f4a2f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.transport.RemoteClusterAware; import java.io.IOException; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,7 +53,7 @@ public class ResolveClusterIT extends AbstractMultiClustersTestCase { private static long LATEST_TIMESTAMP = 1691348820000L; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 581145d949cf9..debcf5c06a7d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -98,11 +98,11 @@ public void run() { finished.set(true); indexingThread.join(); refresh("test"); - ElasticsearchAssertions.assertHitCount(prepareSearch("test").setTrackTotalHits(true), numAutoGenDocs.get()); ElasticsearchAssertions.assertHitCount( + numAutoGenDocs.get(), + prepareSearch("test").setTrackTotalHits(true), prepareSearch("test").setTrackTotalHits(true)// extra paranoia ;) - .setQuery(QueryBuilders.termQuery("auto", true)), - numAutoGenDocs.get() + .setQuery(QueryBuilders.termQuery("auto", true)) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index ae93ba4a9d3bd..7d4269550bb88 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -156,7 +156,6 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.indices.IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; -import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.empty; @@ -257,7 +256,7 @@ private void assertOnGoingRecoveryState( public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) { return Settings.builder() // Set the chunk size in bytes - .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) + .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) // Set one chunk of bytes per second. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES); } @@ -280,7 +279,7 @@ private void unthrottleRecovery() { Settings.builder() // 200mb is an arbitrary number intended to be large enough to avoid more throttling. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb") - .put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) + .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) ); } @@ -1569,18 +1568,15 @@ public static final class TestAnalysisPlugin extends Plugin implements AnalysisP @Override public Map> getTokenFilters() { - return singletonMap( - "test_token_filter", - (indexSettings, environment, name, settings) -> new AbstractTokenFilterFactory(name, settings) { - @Override - public TokenStream create(TokenStream tokenStream) { - if (throwParsingError.get()) { - throw new MapperParsingException("simulate mapping parsing error"); - } - return tokenStream; + return singletonMap("test_token_filter", (indexSettings, environment, name, settings) -> new AbstractTokenFilterFactory(name) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (throwParsingError.get()) { + throw new MapperParsingException("simulate mapping parsing error"); } + return tokenStream; } - ); + }); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index de9e3f28a2109..8496180e85d4e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -500,9 +500,7 @@ public void testIndexTemplateWithAliases() throws Exception { refresh(); - assertHitCount(prepareSearch("test_index"), 5L); - assertHitCount(prepareSearch("simple_alias"), 5L); - assertHitCount(prepareSearch("templated_alias-test_index"), 5L); + assertHitCount(5L, prepareSearch("test_index"), prepareSearch("simple_alias"), prepareSearch("templated_alias-test_index")); assertResponse(prepareSearch("filtered_alias"), response -> { assertHitCount(response, 1L); @@ -584,8 +582,7 @@ public void testIndexTemplateWithAliasesSource() { prepareIndex("test_index").setId("2").setSource("field", "value2").get(); refresh(); - assertHitCount(prepareSearch("test_index"), 2L); - assertHitCount(prepareSearch("alias1"), 2L); + assertHitCount(2L, prepareSearch("test_index"), prepareSearch("alias1")); assertResponse(prepareSearch("alias2"), response -> { assertHitCount(response, 1L); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 38eef4f720623..ca2ff69ac9b17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoveryFilesInfoRequest; -import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -41,7 +41,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; -import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -52,7 +51,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, RecoverySettingsChunkSizePlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class); } /** @@ -63,7 +62,11 @@ protected Collection> nodePlugins() { */ public void testCancelRecoveryAndResume() throws Exception { updateClusterSettings( - Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)) + Settings.builder() + .put( + RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), + new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES) + ) ); NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java index c0a2c83f7fe1e..b2e02b2f4c271 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java @@ -36,7 +36,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.Collection; -import java.util.Iterator; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -136,11 +135,6 @@ public BlobContainer blobContainer(BlobPath path) { return new AssertingBlobContainer(delegateBlobStore.blobContainer(path)); } - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { - delegateBlobStore.deleteBlobsIgnoringIfNotExists(purpose, blobNames); - } - @Override public void close() throws IOException { delegateBlobStore.close(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java index 3f354baace85a..ce898d9be15ca 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSCanMatchIT.java @@ -55,7 +55,7 @@ public class CCSCanMatchIT extends AbstractMultiClustersTestCase { static final String REMOTE_CLUSTER = "cluster_a"; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of("cluster_a"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index c9d34dbf14015..9c1daccd2cc9e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -11,16 +11,19 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot; import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.Result; +import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -78,7 +81,7 @@ protected boolean reuseClusters() { } @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE1, REMOTE2); } @@ -126,12 +129,9 @@ private CCSTelemetrySnapshot getTelemetryFromFailedSearch(SearchRequest searchRe // We want to send search to a specific node (we don't care which one) so that we could // collect the CCS telemetry from it later String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); - PlainActionFuture queryFuture = new PlainActionFuture<>(); - cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest, queryFuture); - assertBusy(() -> assertTrue(queryFuture.isDone())); // We expect failure, but we don't care too much which failure it is in this test - ExecutionException ee = expectThrows(ExecutionException.class, queryFuture::get); + ExecutionException ee = expectThrows(ExecutionException.class, cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest)::get); assertNotNull(ee.getCause()); return getTelemetrySnapshot(nodeName); @@ -637,56 +637,62 @@ private CCSTelemetrySnapshot getTelemetrySnapshot(String nodeName) { return usage.getCcsUsageHolder().getCCSTelemetrySnapshot(); } - private Map setupClusters() { + private Map setupClusters() throws ExecutionException, InterruptedException { String localIndex = "demo"; + String remoteIndex = "prod"; int numShardsLocal = randomIntBetween(2, 10); Settings localSettings = indexSettings(numShardsLocal, randomIntBetween(0, 1)).build(); - assertAcked( + final PlainActionFuture future = new PlainActionFuture<>(); + try (RefCountingListener refCountingListener = new RefCountingListener(future)) { client(LOCAL_CLUSTER).admin() .indices() .prepareCreate(localIndex) .setSettings(localSettings) .setMapping("@timestamp", "type=date", "f", "type=text") - ); - indexDocs(client(LOCAL_CLUSTER), localIndex); - - String remoteIndex = "prod"; - int numShardsRemote = randomIntBetween(2, 10); - for (String clusterAlias : remoteClusterAlias()) { - final InternalTestCluster remoteCluster = cluster(clusterAlias); - remoteCluster.ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); - assertAcked( + .execute(refCountingListener.acquire(r -> { + assertAcked(r); + indexDocs(client(LOCAL_CLUSTER), localIndex, refCountingListener.acquire()); + })); + + int numShardsRemote = randomIntBetween(2, 10); + var remotes = remoteClusterAlias(); + runInParallel(remotes.size(), i -> { + final String clusterAlias = remotes.get(i); + final InternalTestCluster remoteCluster = cluster(clusterAlias); + remoteCluster.ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); client(clusterAlias).admin() .indices() .prepareCreate(remoteIndex) .setSettings(indexSettings(numShardsRemote, randomIntBetween(0, 1))) .setMapping("@timestamp", "type=date", "f", "type=text") - ); - assertFalse( - client(clusterAlias).admin() - .cluster() - .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) - .setWaitForYellowStatus() - .setTimeout(TimeValue.timeValueSeconds(10)) - .get() - .isTimedOut() - ); - indexDocs(client(clusterAlias), remoteIndex); + .execute(refCountingListener.acquire(r -> { + assertAcked(r); + client(clusterAlias).admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) + .setWaitForYellowStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .execute(refCountingListener.acquire(healthResponse -> { + assertFalse(healthResponse.isTimedOut()); + indexDocs(client(clusterAlias), remoteIndex, refCountingListener.acquire()); + })); + })); + }); } - + future.get(); Map clusterInfo = new HashMap<>(); clusterInfo.put("local.index", localIndex); clusterInfo.put("remote.index", remoteIndex); return clusterInfo; } - private int indexDocs(Client client, String index) { + private void indexDocs(Client client, String index, ActionListener listener) { int numDocs = between(5, 20); + final BulkRequestBuilder bulkRequest = client.prepareBulk(); for (int i = 0; i < numDocs; i++) { - client.prepareIndex(index).setSource("f", "v", "@timestamp", randomNonNegativeLong()).get(); + bulkRequest.add(client.prepareIndex(index).setSource("f", "v", "@timestamp", randomNonNegativeLong())); } - client.admin().indices().prepareRefresh(index).get(); - return numDocs; + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).execute(listener.safeMap(r -> null)); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index cb4d0681cdb23..57a9f8131ac2d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -86,7 +86,7 @@ public class CrossClusterIT extends AbstractMultiClustersTestCase { @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of("cluster_a"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 63eece88a53fc..823d3198bc7a2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -60,7 +60,7 @@ public class CrossClusterSearchIT extends AbstractMultiClustersTestCase { private static long LATEST_TIMESTAMP = 1691348820000L; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index 8b493782d55b5..e8a3df353a01e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -38,7 +38,7 @@ public class CrossClusterSearchLeakIT extends AbstractMultiClustersTestCase { @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of("cluster_a"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 36580ebda8aee..fc105d3d4fcd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -3703,8 +3703,9 @@ public List getPreConfiguredTokenFilters() { @Override public Map>> getAnalyzers() { - return singletonMap("mock_whitespace", (indexSettings, environment, name, settings) -> { - return new AbstractIndexAnalyzerProvider(name, settings) { + return singletonMap( + "mock_whitespace", + (indexSettings, environment, name, settings) -> new AbstractIndexAnalyzerProvider(name) { MockAnalyzer instance = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); @@ -3712,8 +3713,8 @@ public Map remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of("remote_cluster"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 8225386ed02d2..acfc55a740f1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -53,6 +53,7 @@ import static org.hamcrest.Matchers.startsWith; public class SimpleNestedIT extends ESIntegTestCase { + public void testSimpleNested() throws Exception { assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); ensureGreen(); @@ -87,21 +88,20 @@ public void testSimpleNested() throws Exception { // check the numDocs assertDocumentCount("test", 3); - assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); - - // search for something that matches the nested doc, and see that we don't find the nested doc - assertHitCount(prepareSearch("test"), 1L); - assertHitCount(prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")), + prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")) + ); - // now, do a nested query - assertHitCountAndNoFailures( + assertHitCount( + 1L, + // search for something that matches the nested doc, and see that we don't find the nested doc + prepareSearch("test"), + // now, do a nested query prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)), - 1L - ); - assertHitCountAndNoFailures( prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH), - 1L + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) ); // add another doc, one that would match if it was not nested... diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java index d1021715ceffc..aaab14941d4bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java @@ -69,7 +69,7 @@ public void testSimpleNested() throws Exception { assertResponse( prepareSearch("test").setKnnSearch( - List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null).innerHit(new InnerHitBuilder())) + List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null, null).innerHit(new InnerHitBuilder())) ).setAllowPartialSearchResults(false), response -> assertThat(response.getHits().getHits().length, greaterThan(0)) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java index 876edc282c903..95d69a6ebaa86 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.vectors.KnnSearchBuilder; +import org.elasticsearch.search.vectors.RescoreVectorBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentFactory; @@ -71,6 +72,7 @@ public void testProfileDfs() throws Exception { new float[] { randomFloat(), randomFloat(), randomFloat() }, randomIntBetween(5, 10), 50, + randomBoolean() ? null : new RescoreVectorBuilder(randomFloatBetween(1.0f, 10.0f, false)), randomBoolean() ? null : randomFloat() ); if (randomBoolean()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java index 97aa428822fae..8dc37bad675e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -43,7 +42,7 @@ public class MinimalCompoundRetrieverIT extends AbstractMultiClustersTestCase { private static final String REMOTE_CLUSTER = "cluster_a"; @Override - protected Collection remoteClusterAlias() { + protected List remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverTelemetryIT.java index 537ace30e88f0..40849bea5512e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverTelemetryIT.java @@ -84,7 +84,9 @@ public void testTelemetryForRetrievers() throws IOException { // search#1 - this will record 1 entry for "retriever" in `sections`, and 1 for "knn" under `retrievers` { - performSearch(new SearchSourceBuilder().retriever(new KnnRetrieverBuilder("vector", new float[] { 1.0f }, null, 10, 15, null))); + performSearch( + new SearchSourceBuilder().retriever(new KnnRetrieverBuilder("vector", new float[] { 1.0f }, null, 10, 15, null, null)) + ); } // search#2 - this will record 1 entry for "retriever" in `sections`, 1 for "standard" under `retrievers`, and 1 for "range" under @@ -98,7 +100,7 @@ public void testTelemetryForRetrievers() throws IOException { { performSearch( new SearchSourceBuilder().retriever( - new StandardRetrieverBuilder(new KnnVectorQueryBuilder("vector", new float[] { 1.0f }, 10, 15, null)) + new StandardRetrieverBuilder(new KnnVectorQueryBuilder("vector", new float[] { 1.0f }, 10, 15, null, null)) ) ); } @@ -112,7 +114,9 @@ public void testTelemetryForRetrievers() throws IOException { // search#5 - t // his will record 1 entry for "knn" in `sections` { - performSearch(new SearchSourceBuilder().knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 1.0f }, 10, 15, null)))); + performSearch( + new SearchSourceBuilder().knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 1.0f }, 10, 15, null, null))) + ); } // search#6 - this will record 1 entry for "query" in `sections`, and 1 for "match_all" under `queries` diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 7ac24b77a4b6d..a54e19b839ad3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -206,11 +206,17 @@ public void testScrollAndUpdateIndex() throws Exception { indicesAdmin().prepareRefresh().get(); - assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 500); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "test")), 500); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "test")), 500); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "update")), 0); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "update")), 0); + assertHitCount( + 500, + prepareSearch().setSize(0).setQuery(matchAllQuery()), + prepareSearch().setSize(0).setQuery(termQuery("message", "test")), + prepareSearch().setSize(0).setQuery(termQuery("message", "test")) + ); + assertHitCount( + 0, + prepareSearch().setSize(0).setQuery(termQuery("message", "update")), + prepareSearch().setSize(0).setQuery(termQuery("message", "update")) + ); SearchResponse searchResponse = prepareSearch().setQuery(queryStringQuery("user:kimchy")) .setSize(35) @@ -229,11 +235,17 @@ public void testScrollAndUpdateIndex() throws Exception { } while (searchResponse.getHits().getHits().length > 0); indicesAdmin().prepareRefresh().get(); - assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 500); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "test")), 0); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "test")), 0); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "update")), 500); - assertHitCount(prepareSearch().setSize(0).setQuery(termQuery("message", "update")), 500); + assertHitCount( + 500, + prepareSearch().setSize(0).setQuery(matchAllQuery()), + prepareSearch().setSize(0).setQuery(termQuery("message", "update")), + prepareSearch().setSize(0).setQuery(termQuery("message", "update")) + ); + assertHitCount( + 0, + prepareSearch().setSize(0).setQuery(termQuery("message", "test")), + prepareSearch().setSize(0).setQuery(termQuery("message", "test")) + ); } finally { clearScroll(searchResponse.getScrollId()); searchResponse.decRef(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index e87c4790aa665..5a9be73d92268 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -147,16 +147,22 @@ public void testIpCidr() throws Exception { prepareIndex("test").setId("5").setSource("ip", "2001:db8::ff00:42:8329").get(); refresh(); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")), 1L); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))), 1L); + assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))), 0L); + assertHitCount( + 1L, + prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1"))), + prepareSearch().setQuery(queryStringQuery("ip: 192.168.0.1")), + prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.1/32"))), + prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))), + prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))) + ); assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.0.0/24"))), 3L); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))), 4L); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))), 4L); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::ff00:42:8329/128"))), 1L); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "2001:db8::/64"))), 1L); + assertHitCount( + 4L, + prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.0.0.0/8"))), + prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0.0.0.0/0"))) + ); assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "::/0"))), 5L); - assertHitCount(prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "192.168.1.5/32"))), 0L); assertFailures( prepareSearch().setQuery(boolQuery().must(QueryBuilders.termQuery("ip", "0/0/0/0/0"))), @@ -170,8 +176,11 @@ public void testSimpleId() { prepareIndex("test").setId("XXX1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); // id is not indexed, but lets see that we automatically convert to - assertHitCount(prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")), 1L); - assertHitCount(prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")), + prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")) + ); } public void testSimpleDateRange() throws Exception { @@ -324,12 +333,12 @@ public void testLargeFromAndSizeSucceeds() throws Exception { createIndex("idx"); indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); - assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10), 1); - assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); assertHitCount( + 1, + prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10), + prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1), - 1 + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) / 2 - 1) ); } @@ -340,12 +349,12 @@ public void testTooLargeFromAndSizeOkBySetting() throws Exception { ).get(); indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); - assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); - assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), 1); assertHitCount( + 1, + prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), + prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), - 1 + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) ); } @@ -358,12 +367,12 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { ); indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); - assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), 1); - assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), 1); assertHitCount( + 1, + prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), + prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1), prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)), - 1 + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)) ); } @@ -371,12 +380,12 @@ public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); indexRandom(true, prepareIndex("idx").setSource("{}", XContentType.JSON)); - assertHitCount(prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), 1); - assertHitCount(prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), 1); assertHitCount( + 1, + prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), + prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) - .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10), - 1 + .setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 87665c3d784f1..bf7a315040caa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -202,7 +202,6 @@ public void testIssue6614() throws InterruptedException { response -> { for (int j = 0; j < response.getHits().getHits().length; j++) { assertThat( - response.toString() + "\n vs. \n" + allDocsResponse.toString(), response.getHits().getHits()[j].getId(), equalTo(allDocsResponse.getHits().getHits()[j].getId()) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index fe83073eeb780..4ba06a34ca3a7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -678,9 +678,12 @@ public void testChangeSettingsOnRestore() throws Exception { indexRandom(true, builders); flushAndRefresh(); - assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "foo")), numdocs); + assertHitCount( + numdocs, + client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "foo")), + client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")) + ); assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "Foo")), 0); - assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")), numdocs); createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx")); @@ -736,8 +739,11 @@ public void testChangeSettingsOnRestore() throws Exception { assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); assertThat(getSettingsResponse.getSetting("test-idx", "index.analysis.analyzer.my_analyzer.type"), equalTo("standard")); - assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "Foo")), numdocs); - assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")), numdocs); + assertHitCount( + numdocs, + client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "Foo")), + client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")) + ); logger.info("--> delete the index and recreate it while deleting all index settings"); cluster().wipeIndices("test-idx"); @@ -758,8 +764,11 @@ public void testChangeSettingsOnRestore() throws Exception { // Make sure that number of shards didn't change assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards)); - assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "Foo")), numdocs); - assertHitCount(client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")), numdocs); + assertHitCount( + numdocs, + client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "Foo")), + client.prepareSearch("test-idx").setSize(0).setQuery(matchQuery("field1", "bar")) + ); } public void testRestoreChangeIndexMode() { @@ -800,6 +809,24 @@ public void testRestoreChangeSyntheticSource() { assertThat(error.getMessage(), containsString("cannot modify setting [index.mapping.source.mode] on restore")); } + public void testRestoreChangeRecoveryUseSyntheticSource() { + Client client = client(); + createRepository("test-repo", "fs"); + String indexName = "test-idx"; + assertAcked(client.admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(indexSettings()))); + createSnapshot("test-repo", "test-snap", Collections.singletonList(indexName)); + cluster().wipeIndices(indexName); + var error = expectThrows(SnapshotRestoreException.class, () -> { + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndexSettings(Settings.builder().put("index.recovery.use_synthetic_source", true)) + .setWaitForCompletion(true) + .get(); + }); + assertThat(error.getMessage(), containsString("cannot modify setting [index.recovery.use_synthetic_source] on restore")); + } + public void testRestoreChangeIndexSorts() { Client client = client(); createRepository("test-repo", "fs"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index e5e641bfdda21..755ee960be73e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -524,6 +524,15 @@ public void testSnapshotShutdownProgressTracker() throws Exception { "Pause signals have been set for all shard snapshots on data node [" + nodeForRemovalId + "]" ) ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "SnapshotShutdownProgressTracker index shard snapshot status messages", + SnapshotShutdownProgressTracker.class.getCanonicalName(), + Level.INFO, + // Expect the shard snapshot to stall in data file upload, since we've blocked the data node file upload to the blob store. + "statusDescription='enqueued file snapshot tasks: threads running concurrent file uploads'" + ) + ); putShutdownForRemovalMetadata(nodeForRemoval, clusterService); @@ -583,6 +592,14 @@ public void testSnapshotShutdownProgressTracker() throws Exception { "Current active shard snapshot stats on data node [" + nodeForRemovalId + "]*Paused [" + numShards + "]" ) ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "SnapshotShutdownProgressTracker index shard snapshot messages", + SnapshotShutdownProgressTracker.class.getCanonicalName(), + Level.INFO, + "statusDescription='finished: master notification attempt complete'" + ) + ); // Release the master node to respond snapshotStatusUpdateLatch.countDown(); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 5acc202ebb294..ff902dbede007 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -147,6 +147,7 @@ exports org.elasticsearch.action.support.master; exports org.elasticsearch.action.support.master.info; exports org.elasticsearch.action.support.nodes; + exports org.elasticsearch.action.support.local; exports org.elasticsearch.action.support.replication; exports org.elasticsearch.action.support.single.instance; exports org.elasticsearch.action.support.single.shard; @@ -458,7 +459,9 @@ org.elasticsearch.index.codec.vectors.ES815HnswBitVectorsFormat, org.elasticsearch.index.codec.vectors.ES815BitFlatVectorFormat, org.elasticsearch.index.codec.vectors.es816.ES816BinaryQuantizedVectorsFormat, - org.elasticsearch.index.codec.vectors.es816.ES816HnswBinaryQuantizedVectorsFormat; + org.elasticsearch.index.codec.vectors.es816.ES816HnswBinaryQuantizedVectorsFormat, + org.elasticsearch.index.codec.vectors.es818.ES818BinaryQuantizedVectorsFormat, + org.elasticsearch.index.codec.vectors.es818.ES818HnswBinaryQuantizedVectorsFormat; provides org.apache.lucene.codecs.Codec with diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 3c5c365654206..11736bfe07deb 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1781,7 +1781,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - TransportVersions.V_7_0_0 + UNKNOWN_VERSION_ADDED ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.elasticsearch.snapshots.SnapshotInProgressException.class, @@ -1947,13 +1947,13 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.ingest.IngestPipelineException.class, org.elasticsearch.ingest.IngestPipelineException::new, 182, - TransportVersions.INGEST_PIPELINE_EXCEPTION_ADDED + TransportVersions.V_8_16_0 ), INDEX_RESPONSE_WRAPPER_EXCEPTION( IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus.class, IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus::new, 183, - TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE + TransportVersions.V_8_16_0 ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 37f6e6434735c..64d1c0535a561 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -16,7 +16,14 @@ import org.elasticsearch.plugins.ExtensionLoader; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; import java.util.ServiceLoader; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Represents the version of the wire protocol used to communicate between a pair of ES nodes. @@ -56,8 +63,14 @@ public static TransportVersion readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); } + /** + * Finds a {@code TransportVersion} by its id. + * If a transport version with the specified ID does not exist, + * this method creates and returns a new instance of {@code TransportVersion} with the specified ID. + * The new instance is not registered in {@code TransportVersion.getAllVersions}. + */ public static TransportVersion fromId(int id) { - TransportVersion known = TransportVersions.VERSION_IDS.get(id); + TransportVersion known = VersionsHolder.ALL_VERSIONS_MAP.get(id); if (known != null) { return known; } @@ -95,7 +108,14 @@ public static boolean isCompatible(TransportVersion version) { * This should be the transport version with the highest id. */ public static TransportVersion current() { - return CurrentHolder.CURRENT; + return VersionsHolder.CURRENT; + } + + /** + * Sorted list of all defined transport versions + */ + public static List getAllVersions() { + return VersionsHolder.ALL_VERSIONS; } public static TransportVersion fromString(String str) { @@ -139,16 +159,25 @@ public String toString() { return Integer.toString(id); } - private static class CurrentHolder { - private static final TransportVersion CURRENT = findCurrent(); + private static class VersionsHolder { + private static final List ALL_VERSIONS; + private static final Map ALL_VERSIONS_MAP; + private static final TransportVersion CURRENT; + + static { + Collection extendedVersions = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class)) + .map(VersionExtension::getTransportVersions) + .orElse(Collections.emptyList()); + + if (extendedVersions.isEmpty()) { + ALL_VERSIONS = TransportVersions.DEFINED_VERSIONS; + } else { + ALL_VERSIONS = Stream.concat(TransportVersions.DEFINED_VERSIONS.stream(), extendedVersions.stream()).sorted().toList(); + } + + ALL_VERSIONS_MAP = ALL_VERSIONS.stream().collect(Collectors.toUnmodifiableMap(TransportVersion::id, Function.identity())); - // finds the pluggable current version - private static TransportVersion findCurrent() { - var version = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class)) - .map(e -> e.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED)) - .orElse(TransportVersions.LATEST_DEFINED); - assert version.onOrAfter(TransportVersions.LATEST_DEFINED); - return version; + CURRENT = ALL_VERSIONS.getLast(); } } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 2e4842912dfae..d61afbdf98587 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -13,13 +13,12 @@ import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; -import java.util.Collection; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.NavigableMap; import java.util.Set; -import java.util.TreeMap; import java.util.TreeSet; import java.util.function.IntFunction; @@ -53,12 +52,7 @@ static TransportVersion def(int id) { @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); - public static final TransportVersion V_7_0_1 = def(7_00_01_99); - public static final TransportVersion V_7_1_0 = def(7_01_00_99); - public static final TransportVersion V_7_2_0 = def(7_02_00_99); - public static final TransportVersion V_7_2_1 = def(7_02_01_99); public static final TransportVersion V_7_3_0 = def(7_03_00_99); - public static final TransportVersion V_7_3_2 = def(7_03_02_99); public static final TransportVersion V_7_4_0 = def(7_04_00_99); public static final TransportVersion V_7_5_0 = def(7_05_00_99); public static final TransportVersion V_7_6_0 = def(7_06_00_99); @@ -104,78 +98,7 @@ static TransportVersion def(int id) { public static final TransportVersion V_8_14_0 = def(8_636_00_1); public static final TransportVersion V_8_15_0 = def(8_702_00_2); public static final TransportVersion V_8_15_2 = def(8_702_00_3); - public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15 = def(8_702_00_4); - public static final TransportVersion ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS = def(8_703_00_0); - public static final TransportVersion INFERENCE_ADAPTIVE_ALLOCATIONS = def(8_704_00_0); - public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0); - public static final TransportVersion ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED = def(8_706_00_0); - public static final TransportVersion ENRICH_CACHE_STATS_SIZE_ADDED = def(8_707_00_0); - public static final TransportVersion ENTERPRISE_GEOIP_DOWNLOADER = def(8_708_00_0); - public static final TransportVersion NODES_STATS_ENUM_SET = def(8_709_00_0); - public static final TransportVersion MASTER_NODE_METRICS = def(8_710_00_0); - public static final TransportVersion SEGMENT_LEVEL_FIELDS_STATS = def(8_711_00_0); - public static final TransportVersion ML_ADD_DETECTION_RULE_PARAMS = def(8_712_00_0); - public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS = def(8_713_00_0); - public static final TransportVersion INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN = def(8_714_00_0); - public static final TransportVersion ESQL_ATTRIBUTE_CACHED_SERIALIZATION = def(8_715_00_0); - public static final TransportVersion REGISTER_SLM_STATS = def(8_716_00_0); - public static final TransportVersion ESQL_NESTED_UNSUPPORTED = def(8_717_00_0); - public static final TransportVersion ESQL_SINGLE_VALUE_QUERY_SOURCE = def(8_718_00_0); - public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0); - public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); - public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); - public static final TransportVersion ZDT_NANOS_SUPPORT_BROKEN = def(8_722_00_0); - public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); - public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); - public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); - public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); - public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); - public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); - public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); - public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); - public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); - public static final TransportVersion REPOSITORIES_TELEMETRY = def(8_732_00_0); - public static final TransportVersion ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED = def(8_733_00_0); - public static final TransportVersion FIELD_CAPS_RESPONSE_INDEX_MODE = def(8_734_00_0); - public static final TransportVersion GET_DATA_STREAMS_VERBOSE = def(8_735_00_0); - public static final TransportVersion ESQL_ADD_INDEX_MODE_CONCRETE_INDICES = def(8_736_00_0); - public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0); - public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0); - public static final TransportVersion CCS_TELEMETRY_STATS = def(8_739_00_0); - public static final TransportVersion GLOBAL_RETENTION_TELEMETRY = def(8_740_00_0); - public static final TransportVersion ROUTING_TABLE_VERSION_REMOVED = def(8_741_00_0); - public static final TransportVersion ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION = def(8_742_00_0); - public static final TransportVersion SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS = def(8_743_00_0); - public static final TransportVersion ML_INFERENCE_IBM_WATSONX_EMBEDDINGS_ADDED = def(8_744_00_0); - public static final TransportVersion BULK_INCREMENTAL_STATE = def(8_745_00_0); - public static final TransportVersion FAILURE_STORE_STATUS_IN_INDEX_RESPONSE = def(8_746_00_0); - public static final TransportVersion ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS = def(8_747_00_0); - public static final TransportVersion ML_TELEMETRY_MEMORY_ADDED = def(8_748_00_0); - public static final TransportVersion ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE = def(8_749_00_0); - public static final TransportVersion SEMANTIC_TEXT_SEARCH_INFERENCE_ID = def(8_750_00_0); - public static final TransportVersion ML_INFERENCE_CHUNKING_SETTINGS = def(8_751_00_0); - public static final TransportVersion SEMANTIC_QUERY_INNER_HITS = def(8_752_00_0); - public static final TransportVersion RETAIN_ILM_STEP_INFO = def(8_753_00_0); - public static final TransportVersion ADD_DATA_STREAM_OPTIONS = def(8_754_00_0); - public static final TransportVersion CCS_REMOTE_TELEMETRY_STATS = def(8_755_00_0); - public static final TransportVersion ESQL_CCS_EXECUTION_INFO = def(8_756_00_0); - public static final TransportVersion REGEX_AND_RANGE_INTERVAL_QUERIES = def(8_757_00_0); - public static final TransportVersion RRF_QUERY_REWRITE = def(8_758_00_0); - public static final TransportVersion SEARCH_FAILURE_STATS = def(8_759_00_0); - public static final TransportVersion INGEST_GEO_DATABASE_PROVIDERS = def(8_760_00_0); - public static final TransportVersion DATE_TIME_DOC_VALUES_LOCALES = def(8_761_00_0); - public static final TransportVersion FAST_REFRESH_RCO = def(8_762_00_0); - public static final TransportVersion TEXT_SIMILARITY_RERANKER_QUERY_REWRITE = def(8_763_00_0); - public static final TransportVersion SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS = def(8_764_00_0); - public static final TransportVersion RETRIEVERS_TELEMETRY_ADDED = def(8_765_00_0); - public static final TransportVersion ESQL_CACHED_STRING_SERIALIZATION = def(8_766_00_0); - public static final TransportVersion CHUNK_SENTENCE_OVERLAP_SETTING_ADDED = def(8_767_00_0); - public static final TransportVersion OPT_IN_ESQL_CCS_EXECUTION_INFO = def(8_768_00_0); - public static final TransportVersion QUERY_RULE_TEST_API = def(8_769_00_0); - public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); - public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); - public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); - public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); + public static final TransportVersion V_8_16_0 = def(8_772_00_1); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16 = def(8_772_00_3); public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16 = def(8_772_00_4); @@ -213,6 +136,8 @@ static TransportVersion def(int id) { public static final TransportVersion SOURCE_MODE_TELEMETRY = def(8_802_00_0); public static final TransportVersion NEW_REFRESH_CLUSTER_BLOCK = def(8_803_00_0); public static final TransportVersion RETRIES_AND_OPERATIONS_IN_BLOBSTORE_STATS = def(8_804_00_0); + public static final TransportVersion ADD_DATA_STREAM_OPTIONS_TO_TEMPLATES = def(8_805_00_0); + public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_806_00_0); /* * STOP! READ THIS FIRST! No, really, @@ -281,21 +206,24 @@ static TransportVersion def(int id) { */ public static final TransportVersion MINIMUM_CCS_VERSION = V_8_15_0; - static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); + /** + * Sorted list of all versions defined in this class + */ + static final List DEFINED_VERSIONS = collectAllVersionIdsDefinedInClass(TransportVersions.class); - // the highest transport version constant defined in this file, used as a fallback for TransportVersion.current() + // the highest transport version constant defined static final TransportVersion LATEST_DEFINED; static { - LATEST_DEFINED = VERSION_IDS.lastEntry().getValue(); + LATEST_DEFINED = DEFINED_VERSIONS.getLast(); // see comment on IDS field // now we're registered all the transport versions, we can clear the map IDS = null; } - public static NavigableMap getAllVersionIds(Class cls) { + public static List collectAllVersionIdsDefinedInClass(Class cls) { Map versionIdFields = new HashMap<>(); - NavigableMap builder = new TreeMap<>(); + List definedTransportVersions = new ArrayList<>(); Set ignore = Set.of("ZERO", "CURRENT", "MINIMUM_COMPATIBLE", "MINIMUM_CCS_VERSION"); @@ -312,7 +240,7 @@ public static NavigableMap getAllVersionIds(Class } catch (IllegalAccessException e) { throw new AssertionError(e); } - builder.put(version.id(), version); + definedTransportVersions.add(version); if (Assertions.ENABLED) { // check the version number is unique @@ -329,11 +257,9 @@ public static NavigableMap getAllVersionIds(Class } } - return Collections.unmodifiableNavigableMap(builder); - } + Collections.sort(definedTransportVersions); - static Collection getAllVersions() { - return VERSION_IDS.values(); + return List.copyOf(definedTransportVersions); } static final IntFunction VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(TransportVersions.class, LATEST_DEFINED.id()); diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 0b4c4dbb1fca6..292f962869558 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -160,9 +160,14 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { boolean isRequireDataStream(); /** - * Finalize the request before executing or routing it. + * Finalize the request before routing it. */ - void process(IndexRouting indexRouting); + default void preRoutingProcess(IndexRouting indexRouting) {} + + /** + * Finalize the request after routing it. + */ + default void postRoutingProcess(IndexRouting indexRouting) {} /** * Pick the appropriate shard id to receive this request. diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java index e14f229f17acf..d929fb457d5d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -118,7 +118,7 @@ public Request(TimeValue masterNodeTimeout, TaskId parentTaskId, EnumSet public Request(StreamInput in) throws IOException { super(in); - this.metrics = in.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS) + this.metrics = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readEnumSet(Metric.class) : EnumSet.of(Metric.ALLOCATIONS, Metric.FS); } @@ -127,7 +127,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0); super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeEnumSet(metrics); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java index d34bc3ec0dc2f..c5e8f37ed3a96 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -117,7 +117,7 @@ public static Metric get(String name) { } public static void writeSetTo(StreamOutput out, EnumSet metrics) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.NODES_STATS_ENUM_SET)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeEnumSet(metrics); } else { out.writeCollection(metrics, (output, metric) -> output.writeString(metric.metricName)); @@ -125,7 +125,7 @@ public static void writeSetTo(StreamOutput out, EnumSet metrics) throws } public static EnumSet readSetFrom(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.NODES_STATS_ENUM_SET)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { return in.readEnumSet(Metric.class); } else { return in.readCollection((i) -> EnumSet.noneOf(Metric.class), (is, out) -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 9ffef1f178f44..b855f2cee7613 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -84,7 +85,7 @@ protected void masterOperation( String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Map indicesAndFilters = new HashMap<>(); - Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); for (String index : concreteIndices) { final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, indicesAndAliases); final String[] aliases = indexNameExpressionResolver.indexAliases( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 9c9467db40de3..b6ced06623306 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -118,7 +118,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { waitForCompletion = in.readBoolean(); partial = in.readBoolean(); userMetadata = in.readGenericMap(); - uuid = in.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS) ? in.readOptionalString() : null; + uuid = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalString() : null; } @Override @@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitForCompletion); out.writeBoolean(partial); out.writeGenericMap(userMetadata); - if (out.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalString(uuid); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index f99baa855404c..abeb73e5d8c3e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -44,14 +44,11 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } else { searchUsageStats = new SearchUsageStats(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats = RepositoryUsageStats.readFrom(in); - } else { - repositoryUsageStats = RepositoryUsageStats.EMPTY; - } - if (in.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { ccsMetrics = new CCSTelemetrySnapshot(in); } else { + repositoryUsageStats = RepositoryUsageStats.EMPTY; ccsMetrics = new CCSTelemetrySnapshot(); } } @@ -118,12 +115,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } - if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats.writeTo(out); - } // else just drop these stats, ok for bwc - if (out.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { ccsMetrics.writeTo(out); - } + } // else just drop these stats, ok for bwc } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java index 47843a91351ee..6c3c5cbb50ece 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java @@ -36,9 +36,9 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersions.CCS_REMOTE_TELEMETRY_STATS) + assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) : "RemoteClusterStatsRequest is not supported by the remote cluster"; - if (out.getTransportVersion().before(TransportVersions.CCS_REMOTE_TELEMETRY_STATS)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { throw new UnsupportedOperationException("RemoteClusterStatsRequest is not supported by the remote cluster"); } super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java index 0f6c56fd21bd7..a6e80b5efd08c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java @@ -22,8 +22,8 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.TransportVersions.RETRIEVERS_TELEMETRY_ADDED; import static org.elasticsearch.TransportVersions.V_8_12_0; +import static org.elasticsearch.TransportVersions.V_8_16_0; /** * Holds a snapshot of the search usage statistics. @@ -71,7 +71,7 @@ public SearchUsageStats(StreamInput in) throws IOException { this.sections = in.readMap(StreamInput::readLong); this.totalSearchCount = in.readVLong(); this.rescorers = in.getTransportVersion().onOrAfter(V_8_12_0) ? in.readMap(StreamInput::readLong) : Map.of(); - this.retrievers = in.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED) ? in.readMap(StreamInput::readLong) : Map.of(); + this.retrievers = in.getTransportVersion().onOrAfter(V_8_16_0) ? in.readMap(StreamInput::readLong) : Map.of(); } @Override @@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(V_8_12_0)) { out.writeMap(rescorers, StreamOutput::writeLong); } - if (out.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED)) { + if (out.getTransportVersion().onOrAfter(V_8_16_0)) { out.writeMap(retrievers, StreamOutput::writeLong); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 97585ea9a1024..2c20daa5d7afb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionType; @@ -72,8 +73,6 @@ import java.util.function.BooleanSupplier; import java.util.stream.Collectors; -import static org.elasticsearch.TransportVersions.CCS_REMOTE_TELEMETRY_STATS; - /** * Transport action implementing _cluster/stats API. */ @@ -450,7 +449,7 @@ protected void sendItemRequest(String clusterAlias, ActionListener { - if (connection.getTransportVersion().before(CCS_REMOTE_TELEMETRY_STATS)) { + if (connection.getTransportVersion().before(TransportVersions.V_8_16_0)) { responseListener.onResponse(null); } else { remoteClusterClient.execute(connection, TransportRemoteClusterStatsAction.REMOTE_TYPE, remoteRequest, responseListener); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index c28108815ed03..af95787a5fcca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -8,21 +8,20 @@ */ package org.elasticsearch.action.admin.indices.alias.get; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; -import java.io.IOException; import java.util.Map; -public class GetAliasesRequest extends ActionRequest implements AliasesRequest { +public class GetAliasesRequest extends LocalClusterStateRequest implements AliasesRequest { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHidden(); @@ -31,18 +30,20 @@ public class GetAliasesRequest extends ActionRequest implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; + @Deprecated public GetAliasesRequest(String... aliases) { - this.aliases = aliases; - this.originalAliases = aliases; + this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, aliases); } + @Deprecated public GetAliasesRequest() { this(Strings.EMPTY_ARRAY); } - @Override - public void writeTo(StreamOutput out) throws IOException { - TransportAction.localOnly(); + public GetAliasesRequest(TimeValue masterTimeout, String... aliases) { + super(masterTimeout); + this.aliases = aliases; + this.originalAliases = aliases; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 67f1a627ae77f..9e0014b0f512b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportLocalClusterStateAction; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index 5c5c71bc002b3..f5c100b7884bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -565,8 +566,8 @@ static void resolveIndices( if (names.length == 1 && (Metadata.ALL.equals(names[0]) || Regex.isMatchAllPattern(names[0]))) { names = new String[] { "**" }; } - Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); - for (String s : resolvedIndexAbstractions) { + Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); + for (ResolvedExpression s : resolvedIndexAbstractions) { enrichIndexAbstraction(clusterState, s, indices, aliases, dataStreams); } indices.sort(Comparator.comparing(ResolvedIndexAbstraction::getName)); @@ -597,12 +598,12 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - String indexAbstraction, + ResolvedExpression indexAbstraction, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction); + IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index c6d990e5a1d62..f729455edcc24 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -131,8 +131,7 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { in.readOptionalWriteable(DataStreamGlobalRetention::read); } } @@ -190,8 +189,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index a47f89030cc60..67f87476ea6a5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -132,8 +132,7 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { in.readOptionalWriteable(DataStreamGlobalRetention::read); } } @@ -191,8 +190,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 064c24cf4afa3..a521dac60e96a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.ResettableValue; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -82,8 +83,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { in.readOptionalWriteable(DataStreamGlobalRetention::read); } } @@ -104,8 +104,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } @@ -113,9 +112,9 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (this.resolvedTemplate != null) { + if (resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); - this.resolvedTemplate.toXContent(builder, params, rolloverConfiguration); + resolvedTemplate.toXContent(builder, ResettableValue.hideResetValues(params), rolloverConfiguration); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 94d9b87467ea8..5f98852148ed4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -61,6 +61,7 @@ import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV1Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findConflictingV2Templates; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.findV2Template; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveDataStreamOptions; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveLifecycle; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings; @@ -348,7 +349,13 @@ public static Template resolveTemplate( if (template.getDataStreamTemplate() != null && lifecycle == null && isDslOnlyMode) { lifecycle = DataStreamLifecycle.DEFAULT; } - return new Template(settings, mergedMapping, aliasesByName, lifecycle); + return new Template( + settings, + mergedMapping, + aliasesByName, + lifecycle, + resolveDataStreamOptions(simulatedState.metadata(), matchingTemplate) + ); } private static IndexLongFieldRange getEventIngestedRange(String indexName, ClusterState simulatedState) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 4e9830fe0d14e..e01f364712676 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; @@ -133,7 +134,7 @@ protected void doExecute(Task task, ValidateQueryRequest request, ActionListener @Override protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) { final ClusterState clusterState = clusterService.state(); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); final AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, shard.getIndexName(), indicesAndAliases); return new ShardValidateQueryRequest(shard.shardId(), aliasFilter, request); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index d5931c85bb2e1..1ff970de7525e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -200,7 +200,7 @@ public Failure(StreamInput in) throws IOException { seqNo = in.readZLong(); term = in.readVLong(); aborted = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.read(in); } else { failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN; @@ -218,7 +218,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(seqNo); out.writeVLong(term); out.writeBoolean(aborted); - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index ad1fda2534fab..4df228240add5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -314,8 +314,9 @@ private Map> groupRequestsByShards( continue; } IndexRouting indexRouting = concreteIndices.routing(concreteIndex); - docWriteRequest.process(indexRouting); + docWriteRequest.preRoutingProcess(indexRouting); int shardId = docWriteRequest.route(indexRouting); + docWriteRequest.postRoutingProcess(indexRouting); List shardRequests = requestsByShard.computeIfAbsent( new ShardId(concreteIndex, shardId), shard -> new ArrayList<>() diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index f62b2f48fa2fd..91caebc420ffb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -98,7 +98,7 @@ public BulkRequest(StreamInput in) throws IOException { for (DocWriteRequest request : requests) { indices.add(Objects.requireNonNull(request.index(), "request index must not be null")); } - if (in.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState = new BulkRequest.IncrementalState(in); } else { incrementalState = BulkRequest.IncrementalState.EMPTY; @@ -454,7 +454,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(requests, DocWriteRequest::writeDocumentRequest); refreshPolicy.writeTo(out); out.writeTimeValue(timeout); - if (out.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index ec7a08007de93..12d3aa67ca9bb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -46,7 +46,7 @@ public BulkResponse(StreamInput in) throws IOException { responses = in.readArray(BulkItemResponse::new, BulkItemResponse[]::new); tookInMillis = in.readVLong(); ingestTookInMillis = in.readZLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState = new BulkRequest.IncrementalState(in); } else { incrementalState = BulkRequest.IncrementalState.EMPTY; @@ -151,7 +151,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeArray(responses); out.writeVLong(tookInMillis); out.writeZLong(ingestTookInMillis); - if (out.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java b/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java index cb83d693a415b..7367dfa1d53fd 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java @@ -124,7 +124,7 @@ public ExceptionWithFailureStoreStatus(BulkItemResponse.Failure failure) { public ExceptionWithFailureStoreStatus(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.fromId(in.readByte()); } else { failureStoreStatus = NOT_APPLICABLE_OR_UNKNOWN; @@ -134,7 +134,7 @@ public ExceptionWithFailureStoreStatus(StreamInput in) throws IOException { @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeByte(failureStoreStatus.getId()); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java index cc7fd431d8097..290d342e9dc12 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java @@ -135,14 +135,11 @@ public SimulateBulkRequest( public SimulateBulkRequest(StreamInput in) throws IOException { super(in); this.pipelineSubstitutions = (Map>) in.readGenericValue(); - if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.componentTemplateSubstitutions = (Map>) in.readGenericValue(); - } else { - componentTemplateSubstitutions = Map.of(); - } - if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) { this.indexTemplateSubstitutions = (Map>) in.readGenericValue(); } else { + componentTemplateSubstitutions = Map.of(); indexTemplateSubstitutions = Map.of(); } if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) { @@ -156,10 +153,8 @@ public SimulateBulkRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeGenericValue(pipelineSubstitutions); - if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeGenericValue(componentTemplateSubstitutions); - } - if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) { out.writeGenericValue(indexTemplateSubstitutions); } if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index cef68324e2a45..e2c73349b93ec 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -42,6 +43,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.Nullable; import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; @@ -638,12 +640,16 @@ private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadat } /** - * Determines if an index name is associated with an index template that has a data stream failure store enabled. + * Determines if an index name is associated with an index template that has a data stream failure store enabled. Since failure store is + * a data stream feature, the method returns true/false only if it is a data stream template, otherwise null. * @param indexName The index name to check. * @param metadata Cluster state metadata. - * @return true if the given index name corresponds to an index template with a data stream failure store enabled. + * @return true the associated index template has failure store enabled, false if the failure store is disabled or it's not specified, + * and null if the template is not a data stream template. + * Visible for testing */ - private static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + @Nullable + static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { if (indexName == null) { return null; } @@ -656,7 +662,11 @@ private static Boolean resolveFailureStoreFromTemplate(String indexName, Metadat ComposableIndexTemplate composableIndexTemplate = metadata.templatesV2().get(template); if (composableIndexTemplate.getDataStreamTemplate() != null) { // Check if the data stream has the failure store enabled - return composableIndexTemplate.getDataStreamTemplate().hasFailureStore(); + DataStreamOptions dataStreamOptions = MetadataIndexTemplateService.resolveDataStreamOptions( + composableIndexTemplate, + metadata.componentTemplates() + ).mapAndGet(DataStreamOptions.Template::toDataStreamOptions); + return dataStreamOptions != null && dataStreamOptions.isFailureStoreEnabled(); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index c1cf0fa7aab42..93c40ad18cc8a 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -112,7 +112,7 @@ public Request(StreamInput in) throws IOException { } else { this.includeDefaults = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.verbose = in.readBoolean(); } else { this.verbose = false; @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } - if (out.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(verbose); } } @@ -275,7 +275,7 @@ public DataStreamInfo( in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null, in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readMap(Index::new, IndexProperties::new) : Map.of(), in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true, - in.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE) ? in.readOptionalVLong() : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null ); } @@ -328,7 +328,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexSettingsValues); out.writeBoolean(templatePreferIlmValue); } - if (out.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalVLong(maximumTimestamp); } } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 631336e080a6a..1a5495412f605 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -237,11 +237,6 @@ public boolean isRequireDataStream() { return false; } - @Override - public void process(IndexRouting indexRouting) { - // Nothing to do - } - @Override public int route(IndexRouting indexRouting) { return indexRouting.deleteShard(id, routing); diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 9c82d032014f2..84c6df7b8a66f 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; @@ -109,7 +110,7 @@ protected boolean resolveIndex(ExplainRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); final AliasFilter aliasFilter = searchService.buildAliasFilter(state, request.concreteIndex(), indicesAndAliases); request.request().filteringAlias(aliasFilter); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index d16100a64713e..6f510ad26f5ec 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -62,7 +62,7 @@ public FieldCapabilitiesIndexResponse( } else { this.indexMappingHash = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.indexMode = IndexMode.readFrom(in); } else { this.indexMode = IndexMode.STANDARD; @@ -77,7 +77,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) { out.writeOptionalString(indexMappingHash); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { IndexMode.writeTo(indexMode, out); } } @@ -105,7 +105,7 @@ static List readList(StreamInput input) throws I private static void collectCompressedResponses(StreamInput input, int groups, ArrayList responses) throws IOException { final CompressedGroup[] compressedGroups = new CompressedGroup[groups]; - final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE); + final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); for (int i = 0; i < groups; i++) { final String[] indices = input.readStringArray(); final IndexMode indexMode = readIndexMode ? IndexMode.readFrom(input) : IndexMode.STANDARD; @@ -179,7 +179,7 @@ private static void writeCompressedResponses(StreamOutput output, Map { o.writeCollection(fieldCapabilitiesIndexResponses, (oo, r) -> oo.writeString(r.indexName)); var first = fieldCapabilitiesIndexResponses.get(0); - if (output.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + if (output.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { IndexMode.writeTo(first.indexMode, o); } o.writeString(first.indexMappingHash); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index c0811e7424b0d..d5b8b657bd14e 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -205,10 +205,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { in.readZLong(); // obsolete normalisedBytesParsed } - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { in.readBoolean(); // obsolete originatesFromUpdateByScript - } - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { in.readBoolean(); // obsolete originatesFromUpdateByDoc } } @@ -687,8 +685,13 @@ public VersionType versionType() { } @Override - public void process(IndexRouting indexRouting) { - indexRouting.process(this); + public void preRoutingProcess(IndexRouting indexRouting) { + indexRouting.preProcess(this); + } + + @Override + public void postRoutingProcess(IndexRouting indexRouting) { + indexRouting.postProcess(this); } /** @@ -789,10 +792,8 @@ private void writeBody(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeZLong(-1); // obsolete normalisedBytesParsed } - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(false); // obsolete originatesFromUpdateByScript - } - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { out.writeBoolean(false); // obsolete originatesFromUpdateByDoc } } @@ -889,7 +890,7 @@ public Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { @Override public int route(IndexRouting indexRouting) { - return indexRouting.indexShard(id, routing, contentType, source, this::routing); + return indexRouting.indexShard(id, routing, contentType, source); } public IndexRequest setRequireAlias(boolean requireAlias) { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 8d1bdf227e24d..7c45de8905174 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -46,7 +46,7 @@ public IndexResponse(ShardId shardId, StreamInput in) throws IOException { } else { executedPipelines = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.read(in); } else { failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN; @@ -60,7 +60,7 @@ public IndexResponse(StreamInput in) throws IOException { } else { executedPipelines = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.read(in); } else { failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN; @@ -126,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalCollection(executedPipelines, StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus.writeTo(out); } } @@ -137,7 +137,7 @@ public void writeThin(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalCollection(executedPipelines, StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 09fb70fb06ba4..800193e258dba 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -739,7 +739,7 @@ void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connecti * @see #onShardFailure(int, SearchShardTarget, Exception) * @see #onShardResult(SearchPhaseResult, SearchShardIterator) */ - final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() + private void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() executeNextPhase(this, this::getNextPhase); } @@ -762,13 +762,6 @@ public final void execute(Runnable command) { executor.execute(command); } - /** - * Notifies the top-level listener of the provided exception - */ - public void onFailure(Exception e) { - listener.onFailure(e); - } - /** * Builds an request for the initial search phase. * diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 8feed2aea00b0..e8d94c32bdcc7 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -102,7 +102,7 @@ private void doRun() { for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { MultiSearchResponse.Item item = it.next(); if (item.isFailure()) { - context.onPhaseFailure(this, "failed to expand hits", item.getFailure()); + phaseFailure(item.getFailure()); return; } SearchHits innerHits = item.getResponse().getHits(); @@ -119,7 +119,11 @@ private void doRun() { } } onPhaseDone(); - }, context::onFailure)); + }, this::phaseFailure)); + } + + private void phaseFailure(Exception ex) { + context.onPhaseFailure(this, "failed to expand hits", ex); } private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) { diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index 969ba2ad983ce..d68e2ce1b02b7 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -63,7 +63,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); } - if (in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.allowPartialSearchResults = in.readBoolean(); } } @@ -82,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalWriteable(indexFilter); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(allowPartialSearchResults); } else if (allowPartialSearchResults) { throw new IOException("[allow_partial_search_results] is not supported on nodes with version " + out.getTransportVersion()); diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index 3c830c8ed9dc1..b3ffc564d848c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -47,7 +47,7 @@ public OpenPointInTimeResponse( @Override public void writeTo(StreamOutput out) throws IOException { out.writeBytesReference(pointInTimeId); - if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(totalShards); out.writeVInt(successfulShards); out.writeVInt(failedShards); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index ca810bb88653f..c2f1510341fb0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -63,14 +63,14 @@ public static BytesReference encode( TransportVersion version, ShardSearchFailure[] shardFailures ) { - assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT) + assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.V_8_16_0) : "[allow_partial_search_results] cannot be enabled on a cluster that has not been fully upgraded to version [" - + TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT + + TransportVersions.V_8_16_0.toReleaseVersion() + "] or higher."; try (var out = new BytesStreamOutput()) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); - boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); int shardSize = searchPhaseResults.size() + (allowNullContextId ? shardFailures.length : 0); out.writeVInt(shardSize); for (var searchResult : searchPhaseResults) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java index 7509a7b0fed04..f91a9d09f4bb4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java @@ -37,7 +37,7 @@ public final class SearchContextIdForNode implements Writeable { } SearchContextIdForNode(StreamInput in) throws IOException { - boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); this.node = allowNull ? in.readOptionalString() : in.readString(); this.clusterAlias = in.readOptionalString(); this.searchContextId = allowNull ? in.readOptionalWriteable(ShardSearchContextId::new) : new ShardSearchContextId(in); @@ -45,7 +45,7 @@ public final class SearchContextIdForNode implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { - boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); if (allowNull) { out.writeOptionalString(node); } else { @@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException { // We should never set a null node if the cluster is not fully upgraded to a version that can handle it. throw new IOException( "Cannot write null node value to a node in version " - + out.getTransportVersion() + + out.getTransportVersion().toReleaseVersion() + ". The target node must be specified to retrieve the ShardSearchContextId." ); } @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException { // We should never set a null search context id if the cluster is not fully upgraded to a version that can handle it. throw new IOException( "Cannot write null search context ID to a node in version " - + out.getTransportVersion() + + out.getTransportVersion().toReleaseVersion() + ". A valid search context ID is required to identify the shard's search context in this version." ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 9e60eedbad6a2..36ca0fba94372 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -104,8 +104,7 @@ public TransportOpenPointInTimeAction( protected void doExecute(Task task, OpenPointInTimeRequest request, ActionListener listener) { final ClusterState clusterState = clusterService.state(); // Check if all the nodes in this cluster know about the service - if (request.allowPartialSearchResults() - && clusterState.getMinTransportVersion().before(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (request.allowPartialSearchResults() && clusterState.getMinTransportVersion().before(TransportVersions.V_8_16_0)) { listener.onFailure( new ElasticsearchStatusException( format( diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 5d1fb46a53cef..ae27406bf396d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -111,6 +112,7 @@ import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; +import java.util.stream.Collectors; import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; @@ -207,7 +209,7 @@ public TransportSearchAction( private Map buildPerIndexOriginalIndices( ClusterState clusterState, - Set indicesAndAliases, + Set indicesAndAliases, String[] indices, IndicesOptions indicesOptions ) { @@ -215,6 +217,9 @@ private Map buildPerIndexOriginalIndices( var blocks = clusterState.blocks(); // optimization: mostly we do not have any blocks so there's no point in the expensive per-index checking boolean hasBlocks = blocks.global().isEmpty() == false || blocks.indices().isEmpty() == false; + // Get a distinct set of index abstraction names present from the resolved expressions to help with the reverse resolution from + // concrete index to the expression that produced it. + Set indicesAndAliasesResources = indicesAndAliases.stream().map(ResolvedExpression::resource).collect(Collectors.toSet()); for (String index : indices) { if (hasBlocks) { blocks.indexBlockedRaiseException(ClusterBlockLevel.READ, index); @@ -231,8 +236,8 @@ private Map buildPerIndexOriginalIndices( String[] finalIndices = Strings.EMPTY_ARRAY; if (aliases == null || aliases.length == 0 - || indicesAndAliases.contains(index) - || hasDataStreamRef(clusterState, indicesAndAliases, index)) { + || indicesAndAliasesResources.contains(index) + || hasDataStreamRef(clusterState, indicesAndAliasesResources, index)) { finalIndices = new String[] { index }; } if (aliases != null) { @@ -251,7 +256,11 @@ private static boolean hasDataStreamRef(ClusterState clusterState, Set i return indicesAndAliases.contains(ret.getParentDataStream().getName()); } - Map buildIndexAliasFilters(ClusterState clusterState, Set indicesAndAliases, Index[] concreteIndices) { + Map buildIndexAliasFilters( + ClusterState clusterState, + Set indicesAndAliases, + Index[] concreteIndices + ) { final Map aliasFilterMap = new HashMap<>(); for (Index index : concreteIndices) { clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); @@ -1236,7 +1245,10 @@ private void executeSearch( } else { final Index[] indices = resolvedIndices.getConcreteLocalIndices(); concreteLocalIndices = Arrays.stream(indices).map(Index::getName).toArray(String[]::new); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions( + clusterState, + searchRequest.indices() + ); aliasFilter = buildIndexAliasFilters(clusterState, indicesAndAliases, indices); aliasFilter.putAll(remoteAliasMap); localShardIterators = getLocalShardsIterator( @@ -1835,7 +1847,7 @@ List getLocalShardsIterator( ClusterState clusterState, SearchRequest searchRequest, String clusterAlias, - Set indicesAndAliases, + Set indicesAndAliases, String[] concreteIndices ) { var routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index d8b57972d604f..614a3e9cf22ae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.index.Index; @@ -127,7 +128,10 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices, null), listener.delegateFailureAndWrap((delegate, searchRequest) -> { Index[] concreteIndices = resolvedIndices.getConcreteLocalIndices(); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions( + clusterState, + searchRequest.indices() + ); final Map aliasFilters = transportSearchAction.buildIndexAliasFilters( clusterState, indicesAndAliases, diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 85889d8398cb1..ebbd47336e3da 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -982,12 +982,11 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { states.add(WildcardStates.HIDDEN); } out.writeEnumSet(states); - if (out.getTransportVersion() - .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeBoolean(includeRegularIndices()); out.writeBoolean(includeFailureIndices()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { selectorOptions.writeTo(out); } } @@ -1010,8 +1009,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti .ignoreThrottled(options.contains(Option.IGNORE_THROTTLED)) .build(); SelectorOptions selectorOptions = SelectorOptions.DEFAULT; - if (in.getTransportVersion() - .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { // Reading from an older node, which will be sending two booleans that we must read out and ignore. var includeData = in.readBoolean(); var includeFailures = in.readBoolean(); @@ -1023,7 +1021,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti selectorOptions = SelectorOptions.FAILURES; } } - if (in.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { selectorOptions = SelectorOptions.read(in); } return new IndicesOptions( diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java deleted file mode 100644 index d48dc74295bcb..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.support; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskManager; - -import java.util.concurrent.Executor; - -/** - * Analogue of {@link org.elasticsearch.action.support.master.TransportMasterNodeReadAction} except that it runs on the local node rather - * than delegating to the master. - */ -public abstract class TransportLocalClusterStateAction extends - TransportAction { - - protected final ClusterService clusterService; - protected final Executor executor; - - protected TransportLocalClusterStateAction( - String actionName, - ActionFilters actionFilters, - TaskManager taskManager, - ClusterService clusterService, - Executor executor - ) { - // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 - super(actionName, actionFilters, taskManager, EsExecutors.DIRECT_EXECUTOR_SERVICE); - this.clusterService = clusterService; - this.executor = executor; - } - - protected abstract ClusterBlockException checkBlock(Request request, ClusterState state); - - @Override - protected final void doExecute(Task task, Request request, ActionListener listener) { - final var state = clusterService.state(); - final var clusterBlockException = checkBlock(request, state); - if (clusterBlockException != null) { - throw clusterBlockException; - } - - // Workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can - executor.execute(ActionRunnable.wrap(listener, l -> localClusterStateOperation(task, request, state, l))); - } - - protected abstract void localClusterStateOperation(Task task, Request request, ClusterState state, ActionListener listener) - throws Exception; -} diff --git a/server/src/main/java/org/elasticsearch/action/support/local/LocalClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/support/local/LocalClusterStateRequest.java new file mode 100644 index 0000000000000..dfbcb21c2a959 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/local/LocalClusterStateRequest.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.support.local; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; + +import java.io.IOException; +import java.util.Objects; + +/** + * A base request for actions that are executed locally on the node that receives the request. + */ +public abstract class LocalClusterStateRequest extends ActionRequest { + + /** + * The timeout for waiting until the cluster is unblocked. + * We use the name masterTimeout to be consistent with the master node actions. + */ + private final TimeValue masterTimeout; + + protected LocalClusterStateRequest(TimeValue masterTimeout) { + this.masterTimeout = Objects.requireNonNull(masterTimeout); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } + + public TimeValue masterTimeout() { + return masterTimeout; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/local/TransportLocalClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/support/local/TransportLocalClusterStateAction.java new file mode 100644 index 0000000000000..66f94050c9826 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/local/TransportLocalClusterStateAction.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.support.local; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskManager; + +import java.util.concurrent.Executor; + +import static org.elasticsearch.common.Strings.format; + +/** + * Analogue of {@link org.elasticsearch.action.support.master.TransportMasterNodeReadAction} except that it runs on the local node rather + * than delegating to the master. + */ +public abstract class TransportLocalClusterStateAction extends + TransportAction { + + private static final Logger logger = LogManager.getLogger(TransportLocalClusterStateAction.class); + + protected final ClusterService clusterService; + protected final Executor executor; + + protected TransportLocalClusterStateAction( + String actionName, + ActionFilters actionFilters, + TaskManager taskManager, + ClusterService clusterService, + Executor executor + ) { + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + super(actionName, actionFilters, taskManager, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.clusterService = clusterService; + this.executor = executor; + } + + protected abstract ClusterBlockException checkBlock(Request request, ClusterState state); + + protected abstract void localClusterStateOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception; + + @Override + protected final void doExecute(Task task, Request request, ActionListener listener) { + final var state = clusterService.state(); + final var clusterBlockException = checkBlock(request, state); + if (clusterBlockException != null) { + if (clusterBlockException.retryable() == false) { + listener.onFailure(clusterBlockException); + } else { + waitForClusterUnblock(task, request, listener, state, clusterBlockException); + } + } else { + innerDoExecute(task, request, listener, state); + } + } + + private void innerDoExecute(Task task, Request request, ActionListener listener, ClusterState state) { + if (task instanceof CancellableTask cancellableTask && cancellableTask.notifyIfCancelled(listener)) { + return; + } + // Workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can + executor.execute(ActionRunnable.wrap(listener, l -> localClusterStateOperation(task, request, state, l))); + } + + private void waitForClusterUnblock( + Task task, + Request request, + ActionListener listener, + ClusterState initialState, + ClusterBlockException exception + ) { + var observer = new ClusterStateObserver( + initialState, + clusterService, + request.masterTimeout(), + logger, + clusterService.threadPool().getThreadContext() + ); + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + logger.trace("retrying with cluster state version [{}]", state.version()); + innerDoExecute(task, request, listener, state); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + logger.debug( + () -> format("timed out while waiting for cluster to unblock in [%s] (timeout [%s])", actionName, timeout), + exception + ); + listener.onFailure(new ElasticsearchTimeoutException("timed out while waiting for cluster to unblock", exception)); + } + }, clusterState -> isTaskCancelled(task) || checkBlock(request, clusterState) == null); + } + + private boolean isTaskCancelled(Task task) { + return task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 91e21eb9e80a3..657ad029626af 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -683,11 +683,6 @@ public boolean isRequireDataStream() { return false; } - @Override - public void process(IndexRouting indexRouting) { - // Nothing to do - } - @Override public int route(IndexRouting indexRouting) { return indexRouting.updateShard(id, routing); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 6a881163914e4..7b2f0c2c894be 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -21,6 +21,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.nativeaccess.NativeAccess; @@ -722,6 +723,9 @@ public final BootstrapCheckResult check(BootstrapContext context) { } boolean isAllPermissionGranted() { + if (RuntimeVersionFeature.isSecurityManagerAvailable() == false) { + return false; + } final SecurityManager sm = System.getSecurityManager(); assert sm != null; try { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index c06ea9305aef8..ae59f6578f03a 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -35,6 +35,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsProbe; @@ -42,9 +43,9 @@ import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; -import org.elasticsearch.plugins.PluginBundle; import org.elasticsearch.plugins.PluginsLoader; -import org.elasticsearch.plugins.PluginsUtils; +import org.elasticsearch.rest.MethodHandlers; +import org.elasticsearch.transport.RequestHandlerRegistry; import java.io.IOException; import java.io.InputStream; @@ -54,10 +55,8 @@ import java.nio.file.Path; import java.security.Permission; import java.security.Security; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -117,12 +116,14 @@ private static Bootstrap initPhase1() { * the presence of a security manager or lack thereof act as if there is a security manager present (e.g., DNS cache policy). * This forces such policies to take effect immediately. */ - org.elasticsearch.bootstrap.Security.setSecurityManager(new SecurityManager() { - @Override - public void checkPermission(Permission perm) { - // grant all permissions so that we can later set the security manager to the one that we want - } - }); + if (RuntimeVersionFeature.isSecurityManagerAvailable()) { + org.elasticsearch.bootstrap.Security.setSecurityManager(new SecurityManager() { + @Override + public void checkPermission(Permission perm) { + // grant all permissions so that we can later set the security manager to the one that we want + } + }); + } LogConfigurator.registerErrorListener(); BootstrapInfo.init(); @@ -202,28 +203,28 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { SubscribableListener.class, RunOnce.class, // We eagerly initialize to work around log4j permissions & JDK-8309727 - VectorUtil.class + VectorUtil.class, + // RequestHandlerRegistry and MethodHandlers classes do nontrivial static initialization which should always succeed but load + // it now (before SM) to be sure + RequestHandlerRegistry.class, + MethodHandlers.class ); // load the plugin Java modules and layers now for use in entitlements var pluginsLoader = PluginsLoader.createPluginsLoader(nodeEnv.modulesFile(), nodeEnv.pluginsFile()); bootstrap.setPluginsLoader(pluginsLoader); + var pluginsResolver = PluginsResolver.create(pluginsLoader); if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) { LogManager.getLogger(Elasticsearch.class).info("Bootstrapping Entitlements"); - List> pluginData = new ArrayList<>(); - Set moduleBundles = PluginsUtils.getModuleBundles(nodeEnv.modulesFile()); - for (PluginBundle moduleBundle : moduleBundles) { - pluginData.add(Tuple.tuple(moduleBundle.getDir(), moduleBundle.pluginDescriptor().isModular())); - } - Set pluginBundles = PluginsUtils.getPluginBundles(nodeEnv.pluginsFile()); - for (PluginBundle pluginBundle : pluginBundles) { - pluginData.add(Tuple.tuple(pluginBundle.getDir(), pluginBundle.pluginDescriptor().isModular())); - } - // TODO: add a functor to map module to plugin name - EntitlementBootstrap.bootstrap(pluginData, callerClass -> null); - } else { + List> pluginData = pluginsLoader.allBundles() + .stream() + .map(bundle -> Tuple.tuple(bundle.getDir(), bundle.pluginDescriptor().isModular())) + .toList(); + + EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName); + } else if (RuntimeVersionFeature.isSecurityManagerAvailable()) { // install SM after natives, shutdown hooks, etc. LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager"); org.elasticsearch.bootstrap.Security.configure( @@ -231,6 +232,8 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()), args.pidFile() ); + } else { + LogManager.getLogger(Elasticsearch.class).warn("Bootstrapping without any protection"); } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java b/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java new file mode 100644 index 0000000000000..256e91cbee16d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.plugins.PluginsLoader; + +import java.util.HashMap; +import java.util.Map; + +class PluginsResolver { + private final Map pluginNameByModule; + + private PluginsResolver(Map pluginNameByModule) { + this.pluginNameByModule = pluginNameByModule; + } + + public static PluginsResolver create(PluginsLoader pluginsLoader) { + Map pluginNameByModule = new HashMap<>(); + + pluginsLoader.pluginLayers().forEach(pluginLayer -> { + var pluginName = pluginLayer.pluginBundle().pluginDescriptor().getName(); + if (pluginLayer.pluginModuleLayer() != null && pluginLayer.pluginModuleLayer() != ModuleLayer.boot()) { + // This plugin is a Java Module + for (var module : pluginLayer.pluginModuleLayer().modules()) { + pluginNameByModule.put(module, pluginName); + } + } else { + // This plugin is not modularized + pluginNameByModule.put(pluginLayer.pluginClassLoader().getUnnamedModule(), pluginName); + } + }); + + return new PluginsResolver(pluginNameByModule); + } + + public String resolveClassToPluginName(Class clazz) { + var module = clazz.getModule(); + return pluginNameByModule.get(module); + } +} diff --git a/server/src/main/java/org/elasticsearch/client/internal/Client.java b/server/src/main/java/org/elasticsearch/client/internal/Client.java index 4158bbfb27cda..2d1cbe0cce7f7 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/Client.java +++ b/server/src/main/java/org/elasticsearch/client/internal/Client.java @@ -52,8 +52,6 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.transport.RemoteClusterService; @@ -74,14 +72,6 @@ */ public interface Client extends ElasticsearchClient { - // Note: This setting is registered only for bwc. The value is never read. - Setting CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { - return switch (s) { - case "node", "transport" -> s; - default -> throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]"); - }; - }, Property.NodeScope, Property.Deprecated); - /** * The admin client that can be used to perform administrative operations. */ diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index 06ec8abf60ff4..172fa34e14ecb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -93,7 +93,7 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } public record Entry(String repository, long repositoryStateId) implements Writeable, RepositoryOperation { diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java index b6c1defe91a75..9cf567c219660 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java @@ -111,7 +111,7 @@ public ClusterIndexHealth(final StreamInput in) throws IOException { unassignedShards = in.readVInt(); status = ClusterHealthStatus.readFrom(in); shards = in.readMapValues(ClusterShardHealth::new, ClusterShardHealth::getShardId); - if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { unassignedPrimaryShards = in.readVInt(); } else { unassignedPrimaryShards = 0; @@ -203,7 +203,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(unassignedShards); out.writeByte(status.value()); out.writeMapValues(shards); - if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(unassignedPrimaryShards); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java index 63863542564cd..f512acb6e04d0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java @@ -96,7 +96,7 @@ public ClusterShardHealth(final StreamInput in) throws IOException { initializingShards = in.readVInt(); unassignedShards = in.readVInt(); primaryActive = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { unassignedPrimaryShards = in.readVInt(); } else { unassignedPrimaryShards = 0; @@ -167,7 +167,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(initializingShards); out.writeVInt(unassignedShards); out.writeBoolean(primaryActive); - if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(unassignedPrimaryShards); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java index 579429b5d51dd..31f275e29c368 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java @@ -120,7 +120,7 @@ public ClusterStateHealth(final StreamInput in) throws IOException { status = ClusterHealthStatus.readFrom(in); indices = in.readMapValues(ClusterIndexHealth::new, ClusterIndexHealth::getIndex); activeShardsPercent = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { unassignedPrimaryShards = in.readVInt(); } else { unassignedPrimaryShards = 0; @@ -212,7 +212,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeByte(status.value()); out.writeMapValues(indices); out.writeDouble(activeShardsPercent); - if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(unassignedPrimaryShards); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index ae7cff6312155..998217e93c426 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -372,16 +373,14 @@ public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField HIDDEN = new ParseField("hidden"); private static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + // Remove this after this PR gets backported + @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) private static final ParseField FAILURE_STORE = new ParseField("failure_store"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_template", false, - args -> new DataStreamTemplate( - args[0] != null && (boolean) args[0], - args[1] != null && (boolean) args[1], - DataStream.isFailureStoreFeatureFlagEnabled() && args[2] != null && (boolean) args[2] - ) + args -> new DataStreamTemplate(args[0] != null && (boolean) args[0], args[1] != null && (boolean) args[1]) ); static { @@ -394,20 +393,14 @@ public static class DataStreamTemplate implements Writeable, ToXContentObject { private final boolean hidden; private final boolean allowCustomRouting; - private final boolean failureStore; public DataStreamTemplate() { - this(false, false, false); + this(false, false); } public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { - this(hidden, allowCustomRouting, false); - } - - public DataStreamTemplate(boolean hidden, boolean allowCustomRouting, boolean failureStore) { this.hidden = hidden; this.allowCustomRouting = allowCustomRouting; - this.failureStore = failureStore; } DataStreamTemplate(StreamInput in) throws IOException { @@ -425,10 +418,9 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting, boolean fa boolean value = in.readBoolean(); assert value == false : "expected false, because this used to be an optional enum that never got set"; } - if (in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { - failureStore = in.readBoolean(); - } else { - failureStore = false; + if (in.getTransportVersion() + .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS_TO_TEMPLATES)) { + in.readBoolean(); } } @@ -458,10 +450,6 @@ public boolean isAllowCustomRouting() { return allowCustomRouting; } - public boolean hasFailureStore() { - return failureStore; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); @@ -472,8 +460,11 @@ public void writeTo(StreamOutput out) throws IOException { // See comment in constructor. out.writeBoolean(false); } - if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { - out.writeBoolean(failureStore); + if (out.getTransportVersion() + .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS_TO_TEMPLATES)) { + // Previous versions expect the failure store to be configured via the DataStreamTemplate. We add it here, so we don't break + // the serialisation, but we do not care to preserve the value because this feature is still behind a feature flag. + out.writeBoolean(false); } } @@ -482,9 +473,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("hidden", hidden); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); - if (DataStream.isFailureStoreFeatureFlagEnabled()) { - builder.field(FAILURE_STORE.getPreferredName(), failureStore); - } builder.endObject(); return builder; } @@ -494,12 +482,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataStreamTemplate that = (DataStreamTemplate) o; - return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting && failureStore == that.failureStore; + return hidden == that.hidden && allowCustomRouting == that.allowCustomRouting; } @Override public int hashCode() { - return Objects.hash(hidden, allowCustomRouting, failureStore); + return Objects.hash(hidden, allowCustomRouting); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 4dcc7c73c280e..1c6206a4815eb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -71,6 +71,7 @@ public final class DataStream implements SimpleDiffable, ToXContentO public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0; public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0; + public static final TransportVersion ADD_DATA_STREAM_OPTIONS_VERSION = TransportVersions.V_8_16_0; public static boolean isFailureStoreFeatureFlagEnabled() { return FAILURE_STORE_FEATURE_FLAG.isEnabled(); @@ -200,9 +201,7 @@ public static DataStream read(StreamInput in) throws IOException { : null; // This boolean flag has been moved in data stream options var failureStoreEnabled = in.getTransportVersion() - .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS) - ? in.readBoolean() - : false; + .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.V_8_16_0) ? in.readBoolean() : false; var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(); @@ -216,7 +215,7 @@ public static DataStream read(StreamInput in) throws IOException { .setAutoShardingEvent(in.readOptionalWriteable(DataStreamAutoShardingEvent::new)); } DataStreamOptions dataStreamOptions; - if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_DATA_STREAM_OPTIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { dataStreamOptions = in.readOptionalWriteable(DataStreamOptions::read); } else { // We cannot distinguish if failure store was explicitly disabled or not. Given that failure store @@ -425,7 +424,7 @@ public boolean isAllowCustomRouting() { * @return true, if the user has explicitly enabled the failure store. */ public boolean isFailureStoreEnabled() { - return dataStreamOptions.failureStore() != null && dataStreamOptions.failureStore().isExplicitlyEnabled(); + return dataStreamOptions.isFailureStoreEnabled(); } @Nullable @@ -1077,7 +1076,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(lifecycle); } if (out.getTransportVersion() - .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS)) { + .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) { out.writeBoolean(isFailureStoreEnabled()); } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { @@ -1093,7 +1092,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(failureIndices.rolloverOnWrite); out.writeOptionalWriteable(failureIndices.autoShardingEvent); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_DATA_STREAM_OPTIONS)) { + if (out.getTransportVersion().onOrAfter(DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) { out.writeOptionalWriteable(dataStreamOptions.isEmpty() ? null : dataStreamOptions); } } @@ -1189,6 +1188,7 @@ public void writeTo(StreamOutput out) throws IOException { ); // The fields behind the feature flag should always be last. if (DataStream.isFailureStoreFeatureFlagEnabled()) { + // Should be removed after backport PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD); PARSER.declareObjectArray( ConstructingObjectParser.optionalConstructorArg(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java index e9d32594fa833..5a6217eea8f7b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java @@ -14,7 +14,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -27,11 +29,13 @@ * supports the following configurations only explicitly enabling or disabling the failure store */ public record DataStreamFailureStore(Boolean enabled) implements SimpleDiffable, ToXContentObject { + public static final String FAILURE_STORE = "failure_store"; + public static final String ENABLED = "enabled"; - public static final ParseField ENABLED_FIELD = new ParseField("enabled"); + public static final ParseField ENABLED_FIELD = new ParseField(ENABLED); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "failure_store", + FAILURE_STORE, false, (args, unused) -> new DataStreamFailureStore((Boolean) args[0]) ); @@ -59,13 +63,6 @@ public static Diff readDiffFrom(StreamInput in) throws I return SimpleDiffable.readDiffFrom(DataStreamFailureStore::new, in); } - /** - * @return iff the user has explicitly enabled the failure store - */ - public boolean isExplicitlyEnabled() { - return enabled != null && enabled; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(enabled); @@ -89,4 +86,80 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static DataStreamFailureStore fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } + + /** + * This class is only used in template configuration. It wraps the fields of {@link DataStreamFailureStore} with {@link ResettableValue} + * to allow a user to signal when they want to reset any previously encountered values during template composition. Furthermore, it + * provides the method {@link #merge(Template, Template)} that dictates how two templates can be composed. + */ + public record Template(ResettableValue enabled) implements Writeable, ToXContentObject { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "failure_store_template", + false, + (args, unused) -> new Template(args[0] == null ? ResettableValue.undefined() : (ResettableValue) args[0]) + ); + + static { + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL + ? ResettableValue.reset() + : ResettableValue.create(p.booleanValue()), + ENABLED_FIELD, + ObjectParser.ValueType.BOOLEAN_OR_NULL + ); + } + + public Template { + if (enabled.get() == null) { + throw new IllegalArgumentException("Failure store configuration should have at least one non-null configuration value."); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ResettableValue.write(out, enabled, StreamOutput::writeBoolean); + } + + public static Template read(StreamInput in) throws IOException { + ResettableValue enabled = ResettableValue.read(in, StreamInput::readBoolean); + return new Template(enabled); + } + + /** + * Converts the template to XContent, depending on the XContent.Params set by {@link ResettableValue#hideResetValues(Params)} + * it may or may not display any explicit nulls when the value is to be reset. + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + enabled.toXContent(builder, params, ENABLED_FIELD.getPreferredName()); + builder.endObject(); + return builder; + } + + public static Template fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * Returns a template which has the value of the initial template updated with the values of the update. + * Note: for now it's a trivial composition because we have only one non-null field. + * @return the composed template + */ + public static Template merge(Template ignored, Template update) { + return update; + } + + public DataStreamFailureStore toFailureStore() { + return new DataStreamFailureStore(enabled.get()); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java index 9cd4e2625e2ba..51e13c05e6892 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java @@ -14,9 +14,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -24,6 +24,8 @@ import java.io.IOException; +import static org.elasticsearch.cluster.metadata.DataStreamFailureStore.FAILURE_STORE; + /** * Holds data stream dedicated configuration options such as failure store, (in the future lifecycle). Currently, it * supports the following configurations: @@ -34,10 +36,10 @@ public record DataStreamOptions(@Nullable DataStreamFailureStore failureStore) SimpleDiffable, ToXContentObject { - public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); + public static final ParseField FAILURE_STORE_FIELD = new ParseField(FAILURE_STORE); public static final DataStreamOptions FAILURE_STORE_ENABLED = new DataStreamOptions(new DataStreamFailureStore(true)); public static final DataStreamOptions FAILURE_STORE_DISABLED = new DataStreamOptions(new DataStreamFailureStore(false)); - public static final DataStreamOptions EMPTY = new DataStreamOptions(); + public static final DataStreamOptions EMPTY = new DataStreamOptions(null); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "options", @@ -46,18 +48,13 @@ public record DataStreamOptions(@Nullable DataStreamFailureStore failureStore) ); static { - PARSER.declareField( + PARSER.declareObject( ConstructingObjectParser.optionalConstructorArg(), (p, c) -> DataStreamFailureStore.fromXContent(p), - FAILURE_STORE_FIELD, - ObjectParser.ValueType.OBJECT_OR_NULL + FAILURE_STORE_FIELD ); } - public DataStreamOptions() { - this(null); - } - public static DataStreamOptions read(StreamInput in) throws IOException { return new DataStreamOptions(in.readOptionalWriteable(DataStreamFailureStore::new)); } @@ -66,8 +63,21 @@ public static Diff readDiffFrom(StreamInput in) throws IOExce return SimpleDiffable.readDiffFrom(DataStreamOptions::read, in); } + /** + * @return true if none of the options are defined + */ public boolean isEmpty() { - return this.equals(EMPTY); + return failureStore == null; + } + + /** + * Determines if this data stream has its failure store enabled or not. Currently, the failure store + * is enabled only when a user has explicitly requested it. + * + * @return true, if the user has explicitly enabled the failure store. + */ + public boolean isFailureStoreEnabled() { + return failureStore != null && Boolean.TRUE.equals(failureStore.enabled()); } @Override @@ -93,4 +103,100 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static DataStreamOptions fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } + + /** + * This class is only used in template configuration. It wraps the fields of {@link DataStreamOptions} with {@link ResettableValue} + * to allow a user to signal when they want to reset any previously encountered values during template composition. Furthermore, it + * provides the {@link Template.Builder} that dictates how two templates can be composed. + */ + public record Template(ResettableValue failureStore) implements Writeable, ToXContentObject { + public static final Template EMPTY = new Template(ResettableValue.undefined()); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_stream_options_template", + false, + (args, unused) -> new Template( + args[0] == null ? ResettableValue.undefined() : (ResettableValue) args[0] + ) + ); + + static { + PARSER.declareObjectOrNull( + ConstructingObjectParser.optionalConstructorArg(), + (p, s) -> ResettableValue.create(DataStreamFailureStore.Template.fromXContent(p)), + ResettableValue.reset(), + FAILURE_STORE_FIELD + ); + } + + public Template { + assert failureStore != null : "Template does not accept null values, please use Resettable.undefined()"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ResettableValue.write(out, failureStore, (o, v) -> v.writeTo(o)); + } + + public static Template read(StreamInput in) throws IOException { + ResettableValue failureStore = ResettableValue.read(in, DataStreamFailureStore.Template::read); + return new Template(failureStore); + } + + public static Template fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * Converts the template to XContent, depending on the {@param params} set by {@link ResettableValue#hideResetValues(Params)} + * it may or may not display any explicit nulls when the value is to be reset. + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + failureStore.toXContent(builder, params, FAILURE_STORE_FIELD.getPreferredName()); + builder.endObject(); + return builder; + } + + public DataStreamOptions toDataStreamOptions() { + return new DataStreamOptions(failureStore.mapAndGet(DataStreamFailureStore.Template::toFailureStore)); + } + + public static Builder builder(Template template) { + return new Builder(template); + } + + /** + * Builds and composes a data stream template. + */ + public static class Builder { + private ResettableValue failureStore = ResettableValue.undefined(); + + public Builder(Template template) { + if (template != null) { + failureStore = template.failureStore(); + } + } + + /** + * Updates the current failure store configuration with the provided value. This is not a replacement necessarily, if both + * instance contain data the configurations are merged. + */ + public Builder updateFailureStore(ResettableValue newFailureStore) { + failureStore = ResettableValue.merge(failureStore, newFailureStore, DataStreamFailureStore.Template::merge); + return this; + } + + public Template build() { + return new Template(failureStore); + } + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 681ea84513088..952789e1bf746 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -1618,11 +1618,7 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { - aliasesVersion = in.readVLong(); - } else { - aliasesVersion = 1; - } + aliasesVersion = in.readVLong(); state = State.fromId(in.readByte()); if (in.getTransportVersion().onOrAfter(SETTING_DIFF_VERSION)) { settings = null; @@ -1688,9 +1684,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeByte(state.id); assert settings != null : "settings should always be non-null since this instance is not expected to have been read from another node"; @@ -1776,9 +1770,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function dataStreamNames(ClusterState state, IndicesOptions options, getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressionsToResources(context, indexExpressions); + final Collection expressions = resolveExpressionsToResources(context, indexExpressions); return expressions.stream() + .map(ResolvedExpression::resource) .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) .filter(ia -> ia.getType() == Type.DATA_STREAM) @@ -227,10 +235,11 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressionsToResources(context, request.index()); + final Collection expressions = resolveExpressionsToResources(context, request.index()); if (expressions.size() == 1) { - IndexAbstraction ia = state.metadata().getIndicesLookup().get(expressions.iterator().next()); + ResolvedExpression resolvedExpression = expressions.iterator().next(); + IndexAbstraction ia = state.metadata().getIndicesLookup().get(resolvedExpression.resource()); if (ia.getType() == Type.ALIAS) { Index writeIndex = ia.getWriteIndex(); if (writeIndex == null) { @@ -257,7 +266,7 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit * If {@param preserveDataStreams} is {@code true}, data streams that are covered by the wildcards from the * {@param expressions} are returned as-is, without expanding them further to their respective backing indices. */ - protected static Collection resolveExpressionsToResources(Context context, String... expressions) { + protected static Collection resolveExpressionsToResources(Context context, String... expressions) { // If we do not expand wildcards, then empty or _all expression result in an empty list boolean expandWildcards = context.getOptions().expandWildcardExpressions(); if (expandWildcards == false) { @@ -275,7 +284,7 @@ protected static Collection resolveExpressionsToResources(Context contex } // Using ArrayList when we know we do not have wildcards is an optimisation, given that one expression result in 0 or 1 resources. - Collection resources = expandWildcards && WildcardExpressionResolver.hasWildcards(expressions) + Collection resources = expandWildcards && WildcardExpressionResolver.hasWildcards(expressions) ? new LinkedHashSet<>() : new ArrayList<>(expressions.length); boolean wildcardSeen = false; @@ -297,7 +306,7 @@ protected static Collection resolveExpressionsToResources(Context contex wildcardSeen |= isWildcard; if (isWildcard) { - Set matchingResources = WildcardExpressionResolver.matchWildcardToResources(context, baseExpression); + Set matchingResources = WildcardExpressionResolver.matchWildcardToResources(context, baseExpression); if (context.getOptions().allowNoIndices() == false && matchingResources.isEmpty()) { throw notFoundException(baseExpression); @@ -310,9 +319,9 @@ protected static Collection resolveExpressionsToResources(Context contex } } else { if (isExclusion) { - resources.remove(baseExpression); + resources.remove(new ResolvedExpression(baseExpression)); } else if (ensureAliasOrIndexExists(context, baseExpression)) { - resources.add(baseExpression); + resources.add(new ResolvedExpression(baseExpression)); } } } @@ -428,12 +437,12 @@ String[] concreteIndexNames(Context context, String... indexExpressions) { } Index[] concreteIndices(Context context, String... indexExpressions) { - final Collection expressions = resolveExpressionsToResources(context, indexExpressions); + final Collection expressions = resolveExpressionsToResources(context, indexExpressions); final Set concreteIndicesResult = Sets.newLinkedHashSetWithExpectedSize(expressions.size()); final Map indicesLookup = context.getState().metadata().getIndicesLookup(); - for (String expression : expressions) { - final IndexAbstraction indexAbstraction = indicesLookup.get(expression); + for (ResolvedExpression expression : expressions) { + final IndexAbstraction indexAbstraction = indicesLookup.get(expression.resource()); assert indexAbstraction != null; if (indexAbstraction.getType() == Type.ALIAS && context.isResolveToWriteIndex()) { Index writeIndex = indexAbstraction.getWriteIndex(); @@ -467,7 +476,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { throw new IllegalArgumentException( indexAbstraction.getType().getDisplayName() + " [" - + expression + + expression.resource() + "] has more than one index associated with it " + Arrays.toString(indexNames) + ", can't execute a single index op" @@ -682,7 +691,7 @@ public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { * Utility method that allows to resolve an index expression to its corresponding single write index. * * @param state the cluster state containing all the data to resolve to expression to a concrete index - * @param request The request that defines how the an alias or an index need to be resolved to a concrete index + * @param request The request that defines how an alias or an index need to be resolved to a concrete index * and the expression that can be resolved to an alias or an index name. * @throws IllegalArgumentException if the index resolution does not lead to an index, or leads to more than one index * @return the write index obtained as a result of the index resolution @@ -759,7 +768,7 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) /** * Resolve an array of expressions to the set of indices and aliases that these expressions match. */ - public Set resolveExpressions(ClusterState state, String... expressions) { + public Set resolveExpressions(ClusterState state, String... expressions) { return resolveExpressions(state, IndicesOptions.lenientExpandOpen(), false, expressions); } @@ -768,7 +777,7 @@ public Set resolveExpressions(ClusterState state, String... expressions) * If {@param preserveDataStreams} is {@code true}, datastreams that are covered by the wildcards from the * {@param expressions} are returned as-is, without expanding them further to their respective backing indices. */ - public Set resolveExpressions( + public Set resolveExpressions( ClusterState state, IndicesOptions indicesOptions, boolean preserveDataStreams, @@ -786,10 +795,10 @@ public Set resolveExpressions( getNetNewSystemIndexPredicate() ); // unmodifiable without creating a new collection as it might contain many items - Collection resolved = resolveExpressionsToResources(context, expressions); - if (resolved instanceof Set) { + Collection resolved = resolveExpressionsToResources(context, expressions); + if (resolved instanceof Set) { // unmodifiable without creating a new collection as it might contain many items - return Collections.unmodifiableSet((Set) resolved); + return Collections.unmodifiableSet((Set) resolved); } else { return Set.copyOf(resolved); } @@ -802,7 +811,7 @@ public Set resolveExpressions( * the index itself - null is returned. Returns {@code null} if no filtering is required. * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressionsToResources(Context, String...)}. */ - public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { + public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { return indexAliases(state, index, AliasMetadata::filteringRequired, DataStreamAlias::filteringRequired, false, resolvedExpressions); } @@ -829,26 +838,25 @@ public String[] indexAliases( Predicate requiredAlias, Predicate requiredDataStreamAlias, boolean skipIdentity, - Set resolvedExpressions + Set resolvedExpressions ) { - if (isAllIndices(resolvedExpressions)) { + if (isAllIndicesExpression(resolvedExpressions)) { return null; } - final IndexMetadata indexMetadata = state.metadata().getIndices().get(index); if (indexMetadata == null) { // Shouldn't happen throw new IndexNotFoundException(index); } - if (skipIdentity == false && resolvedExpressions.contains(index)) { + if (skipIdentity == false && resolvedExpressions.contains(new ResolvedExpression(index))) { return null; } IndexAbstraction ia = state.metadata().getIndicesLookup().get(index); DataStream dataStream = ia.getParentDataStream(); if (dataStream != null) { - if (skipIdentity == false && resolvedExpressions.contains(dataStream.getName())) { + if (skipIdentity == false && resolvedExpressions.contains(new ResolvedExpression(dataStream.getName()))) { // skip the filters when the request targets the data stream name return null; } @@ -857,11 +865,12 @@ public String[] indexAliases( if (iterateIndexAliases(dataStreamAliases.size(), resolvedExpressions.size())) { aliasesForDataStream = dataStreamAliases.values() .stream() - .filter(dataStreamAlias -> resolvedExpressions.contains(dataStreamAlias.getName())) + .filter(dataStreamAlias -> resolvedExpressions.contains(new ResolvedExpression(dataStreamAlias.getName()))) .filter(dataStreamAlias -> dataStreamAlias.getDataStreams().contains(dataStream.getName())) .toList(); } else { aliasesForDataStream = resolvedExpressions.stream() + .map(ResolvedExpression::resource) .map(dataStreamAliases::get) .filter(dataStreamAlias -> dataStreamAlias != null && dataStreamAlias.getDataStreams().contains(dataStream.getName())) .toList(); @@ -890,11 +899,12 @@ public String[] indexAliases( // faster to iterate indexAliases aliasCandidates = indexAliases.values() .stream() - .filter(aliasMetadata -> resolvedExpressions.contains(aliasMetadata.alias())) + .filter(aliasMetadata -> resolvedExpressions.contains(new ResolvedExpression(aliasMetadata.alias()))) .toArray(AliasMetadata[]::new); } else { // faster to iterate resolvedExpressions aliasCandidates = resolvedExpressions.stream() + .map(ResolvedExpression::resource) .map(indexAliases::get) .filter(Objects::nonNull) .toArray(AliasMetadata[]::new); @@ -937,12 +947,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection resolvedExpressions = resolveExpressionsToResources(context, expressions); - - // TODO: it appears that this can never be true? - if (isAllIndices(resolvedExpressions)) { - return resolveSearchRoutingAllIndices(state.metadata(), routing); - } + final Collection resolvedExpressions = resolveExpressionsToResources(context, expressions); Map> routings = null; Set paramRouting = null; @@ -952,8 +957,8 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); } - for (String expression : resolvedExpressions) { - IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(expression); + for (ResolvedExpression resolvedExpression : resolvedExpressions) { + IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(resolvedExpression.resource()); if (indexAbstraction != null && indexAbstraction.getType() == Type.ALIAS) { for (int i = 0, n = indexAbstraction.getIndices().size(); i < n; i++) { Index index = indexAbstraction.getIndices().get(i); @@ -993,7 +998,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab } } else { // Index - routings = collectRoutings(routings, paramRouting, norouting, expression); + routings = collectRoutings(routings, paramRouting, norouting, resolvedExpression.resource()); } } @@ -1039,6 +1044,30 @@ public static Map> resolveSearchRoutingAllIndices(Metadata m return null; } + /** + * Identifies whether the array containing index names given as argument refers to all indices + * The empty or null array identifies all indices + * + * @param aliasesOrIndices the array containing index names + * @return true if the provided array maps to all indices, false otherwise + */ + public static boolean isAllIndicesExpression(Collection aliasesOrIndices) { + return aliasesOrIndices == null || aliasesOrIndices.isEmpty() || isExplicitAllPatternExpression(aliasesOrIndices); + } + + /** + * Identifies whether the array containing index names given as argument explicitly refers to all indices + * The empty or null array doesn't explicitly map to all indices + * + * @param aliasesOrIndices the array containing index names + * @return true if the provided array explicitly maps to all indices, false otherwise + */ + static boolean isExplicitAllPatternExpression(Collection aliasesOrIndices) { + return aliasesOrIndices != null + && aliasesOrIndices.size() == 1 + && Metadata.ALL.equals(aliasesOrIndices.iterator().next().resource()); + } + /** * Identifies whether the array containing index names given as argument refers to all indices * The empty or null array identifies all indices @@ -1334,14 +1363,14 @@ private WildcardExpressionResolver() { * Returns all the indices, data streams, and aliases, considering the open/closed, system, and hidden context parameters. * Depending on the context, returns the names of the data streams themselves or their backing indices. */ - public static Collection resolveAll(Context context) { - List concreteIndices = resolveEmptyOrTrivialWildcard(context); + public static Collection resolveAll(Context context) { + List concreteIndices = resolveEmptyOrTrivialWildcard(context); if (context.includeDataStreams() == false && context.getOptions().ignoreAliases()) { return concreteIndices; } - Set resolved = new HashSet<>(concreteIndices.size()); + Set resolved = new HashSet<>(concreteIndices.size()); context.getState() .metadata() .getIndicesLookup() @@ -1386,10 +1415,10 @@ private static IndexMetadata.State excludeState(IndicesOptions options) { * The {@param context} provides the current time-snapshot view of cluster state, as well as conditions * on whether to consider alias, data stream, system, and hidden resources. */ - static Set matchWildcardToResources(Context context, String wildcardExpression) { + static Set matchWildcardToResources(Context context, String wildcardExpression) { assert isWildcard(wildcardExpression); final SortedMap indicesLookup = context.getState().getMetadata().getIndicesLookup(); - Set matchedResources = new HashSet<>(); + Set matchedResources = new HashSet<>(); // this applies an initial pre-filtering in the case where the expression is a common suffix wildcard, eg "test*" if (Regex.isSuffixMatchPattern(wildcardExpression)) { for (IndexAbstraction ia : filterIndicesLookupForSuffixWildcard(indicesLookup, wildcardExpression).values()) { @@ -1416,7 +1445,7 @@ private static void maybeAddToResult( Context context, String wildcardExpression, IndexAbstraction indexAbstraction, - Set matchedResources + Set matchedResources ) { if (shouldExpandToIndexAbstraction(context, wildcardExpression, indexAbstraction)) { matchedResources.addAll(expandToOpenClosed(context, indexAbstraction)); @@ -1475,20 +1504,20 @@ private static Map filterIndicesLookupForSuffixWildcar * Data streams and aliases are interpreted to refer to multiple indices, * then all index resources are filtered by their open/closed status. */ - private static Set expandToOpenClosed(Context context, IndexAbstraction indexAbstraction) { + private static Set expandToOpenClosed(Context context, IndexAbstraction indexAbstraction) { final IndexMetadata.State excludeState = excludeState(context.getOptions()); - Set resources = new HashSet<>(); + Set resources = new HashSet<>(); if (context.isPreserveAliases() && indexAbstraction.getType() == Type.ALIAS) { - resources.add(indexAbstraction.getName()); + resources.add(new ResolvedExpression(indexAbstraction.getName())); } else if (context.isPreserveDataStreams() && indexAbstraction.getType() == Type.DATA_STREAM) { - resources.add(indexAbstraction.getName()); + resources.add(new ResolvedExpression(indexAbstraction.getName())); } else { if (shouldIncludeRegularIndices(context.getOptions())) { for (int i = 0, n = indexAbstraction.getIndices().size(); i < n; i++) { Index index = indexAbstraction.getIndices().get(i); IndexMetadata indexMetadata = context.state.metadata().index(index); if (indexMetadata.getState() != excludeState) { - resources.add(index.getName()); + resources.add(new ResolvedExpression(index.getName())); } } } @@ -1498,7 +1527,7 @@ private static Set expandToOpenClosed(Context context, IndexAbstraction Index index = dataStream.getFailureIndices().getIndices().get(i); IndexMetadata indexMetadata = context.state.metadata().index(index); if (indexMetadata.getState() != excludeState) { - resources.add(index.getName()); + resources.add(new ResolvedExpression(index.getName())); } } } @@ -1506,20 +1535,27 @@ private static Set expandToOpenClosed(Context context, IndexAbstraction return resources; } - private static List resolveEmptyOrTrivialWildcard(Context context) { + private static List resolveEmptyOrTrivialWildcard(Context context) { final String[] allIndices = resolveEmptyOrTrivialWildcardToAllIndices(context.getOptions(), context.getState().metadata()); if (context.systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { - return List.of(allIndices); + List result = new ArrayList<>(allIndices.length); + for (int i = 0; i < allIndices.length; i++) { + result.add(new ResolvedExpression(allIndices[i])); + } + return result; } else { return resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(context, allIndices); } } - private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(Context context, String[] allIndices) { - List filteredIndices = new ArrayList<>(allIndices.length); + private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndices( + Context context, + String[] allIndices + ) { + List filteredIndices = new ArrayList<>(allIndices.length); for (int i = 0; i < allIndices.length; i++) { if (shouldIncludeIndexAbstraction(context, allIndices[i])) { - filteredIndices.add(allIndices[i]); + filteredIndices.add(new ResolvedExpression(allIndices[i])); } } return filteredIndices; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java index 271c60e829a87..8917d5a9cbbb5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; @@ -23,8 +24,6 @@ import java.util.List; import java.util.Objects; -import static org.elasticsearch.TransportVersions.SEMANTIC_TEXT_SEARCH_INFERENCE_ID; - /** * Contains inference field data for fields. * As inference is done in the coordinator node to avoid re-doing it at shard / replica level, the coordinator needs to check for the need @@ -56,7 +55,7 @@ public InferenceFieldMetadata(String name, String inferenceId, String searchInfe public InferenceFieldMetadata(StreamInput input) throws IOException { this.name = input.readString(); this.inferenceId = input.readString(); - if (input.getTransportVersion().onOrAfter(SEMANTIC_TEXT_SEARCH_INFERENCE_ID)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.searchInferenceId = input.readString(); } else { this.searchInferenceId = this.inferenceId; @@ -68,7 +67,7 @@ public InferenceFieldMetadata(StreamInput input) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(inferenceId); - if (out.getTransportVersion().onOrAfter(SEMANTIC_TEXT_SEARCH_INFERENCE_ID)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeString(searchInferenceId); } out.writeStringArray(sourceFields); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 5dbf4da6f376f..0de87c7226380 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -261,11 +261,16 @@ static ClusterState createDataStream( // This is not a problem as both have different prefixes (`.ds-` vs `.fs-`) and both will be using the same `generation` field // when rolling over in the future. final long initialGeneration = 1; + ResettableValue dataStreamOptionsTemplate = isSystem + ? MetadataIndexTemplateService.resolveDataStreamOptions(template, systemDataStreamDescriptor.getComponentTemplates()) + : MetadataIndexTemplateService.resolveDataStreamOptions(template, metadata.componentTemplates()); + final DataStreamOptions dataStreamOptions = dataStreamOptionsTemplate.mapAndGet(DataStreamOptions.Template::toDataStreamOptions); + var isFailureStoreEnabled = dataStreamOptions != null && dataStreamOptions.isFailureStoreEnabled(); // If we need to create a failure store, do so first. Do not reroute during the creation since we will do // that as part of creating the backing index if required. IndexMetadata failureStoreIndex = null; - if (template.getDataStreamTemplate().hasFailureStore() && initializeFailureStore) { + if (isFailureStoreEnabled && initializeFailureStore) { if (isSystem) { throw new IllegalArgumentException("Failure stores are not supported on system data streams"); } @@ -303,7 +308,7 @@ static ClusterState createDataStream( } assert writeIndex != null; assert writeIndex.mapping() != null : "no mapping found for backing index [" + writeIndex.getIndex().getName() + "]"; - assert template.getDataStreamTemplate().hasFailureStore() == false || initializeFailureStore == false || failureStoreIndex != null + assert isFailureStoreEnabled == false || initializeFailureStore == false || failureStoreIndex != null : "failure store should have an initial index"; assert failureStoreIndex == null || failureStoreIndex.mapping() != null : "no mapping found for failure store [" + failureStoreIndex.getIndex().getName() + "]"; @@ -329,7 +334,7 @@ static ClusterState createDataStream( template.getDataStreamTemplate().isAllowCustomRouting(), indexMode, lifecycle == null && isDslOnlyMode ? DataStreamLifecycle.DEFAULT : lifecycle, - template.getDataStreamTemplate().hasFailureStore() ? DataStreamOptions.FAILURE_STORE_ENABLED : DataStreamOptions.EMPTY, + dataStreamOptions, new DataStream.DataStreamIndices(DataStream.BACKING_INDEX_PREFIX, dsBackingIndices, false, null), // If the failure store shouldn't be initialized on data stream creation, we're marking it for "lazy rollover", which will // initialize the failure store on first write. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 52e4d75ac5116..75c2c06f36c8e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1591,6 +1591,7 @@ static void validateCloneIndex( private static final Set UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of( IndexSettings.MODE.getKey(), SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 3878a3329b634..7f8b87d2d3f48 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -312,12 +312,7 @@ public ClusterState addComponentTemplate( } } - final Template finalTemplate = new Template( - finalSettings, - wrappedMappings, - template.template().aliases(), - template.template().lifecycle() - ); + final Template finalTemplate = Template.builder(template.template()).settings(finalSettings).mappings(wrappedMappings).build(); final ComponentTemplate finalComponentTemplate = new ComponentTemplate( finalTemplate, template.version(), @@ -348,6 +343,7 @@ public ClusterState addComponentTemplate( composableTemplate, globalRetentionSettings.get() ); + validateDataStreamOptions(tempStateWithComponentTemplateAdded.metadata(), composableTemplateName, composableTemplate); validateIndexTemplateV2(composableTemplateName, composableTemplate, tempStateWithComponentTemplateAdded); } catch (Exception e) { if (validationFailure == null) { @@ -629,7 +625,7 @@ public ClusterState addIndexTemplateV2( // adjusted (to add _doc) and it should be validated CompressedXContent mappings = innerTemplate.mappings(); CompressedXContent wrappedMappings = wrapMappingsIfNecessary(mappings, xContentRegistry); - final Template finalTemplate = new Template(finalSettings, wrappedMappings, innerTemplate.aliases(), innerTemplate.lifecycle()); + final Template finalTemplate = Template.builder(innerTemplate).settings(finalSettings).mappings(wrappedMappings).build(); finalIndexTemplate = template.toBuilder().template(finalTemplate).build(); } @@ -690,7 +686,8 @@ public static Map> v2TemplateOverlaps( return overlaps; } - private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexTemplate, ClusterState currentState) { + // Visibility for testing + void validateIndexTemplateV2(String name, ComposableIndexTemplate indexTemplate, ClusterState currentState) { // Workaround for the fact that start_time and end_time are injected by the MetadataCreateDataStreamService upon creation, // but when validating templates that create data streams the MetadataCreateDataStreamService isn't used. var finalTemplate = indexTemplate.template(); @@ -726,6 +723,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionSettings.get()); + validateDataStreamOptions(currentState.metadata(), name, templateToValidate); if (templateToValidate.isDeprecated() == false) { validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates()); @@ -819,6 +817,20 @@ static void validateLifecycle( } } + // Visible for testing + static void validateDataStreamOptions(Metadata metadata, String indexTemplateName, ComposableIndexTemplate template) { + ResettableValue dataStreamOptions = resolveDataStreamOptions(template, metadata.componentTemplates()); + if (dataStreamOptions.get() != null) { + if (template.getDataStreamTemplate() == null) { + throw new IllegalArgumentException( + "index template [" + + indexTemplateName + + "] specifies data stream options that can only be used in combination with a data stream" + ); + } + } + } + /** * Validate that by changing or adding {@code newTemplate}, there are * no unreferenced data streams. Note that this scenario is still possible @@ -1561,7 +1573,7 @@ static List> resolveAliases( public static DataStreamLifecycle resolveLifecycle(final Metadata metadata, final String templateName) { final ComposableIndexTemplate template = metadata.templatesV2().get(templateName); assert template != null - : "attempted to resolve settings for a template [" + templateName + "] that did not exist in the cluster state"; + : "attempted to resolve lifecycle for a template [" + templateName + "] that did not exist in the cluster state"; if (template == null) { return null; } @@ -1653,6 +1665,81 @@ public static DataStreamLifecycle composeDataLifecycles(List} object + */ + public static ResettableValue resolveDataStreamOptions(final Metadata metadata, final String templateName) { + final ComposableIndexTemplate template = metadata.templatesV2().get(templateName); + assert template != null + : "attempted to resolve data stream options for a template [" + templateName + "] that did not exist in the cluster state"; + if (template == null) { + return ResettableValue.undefined(); + } + return resolveDataStreamOptions(template, metadata.componentTemplates()); + } + + /** + * Resolve the provided v2 template and component templates into a {@link ResettableValue} object + */ + public static ResettableValue resolveDataStreamOptions( + ComposableIndexTemplate template, + Map componentTemplates + ) { + Objects.requireNonNull(template, "attempted to resolve data stream for a null template"); + Objects.requireNonNull(componentTemplates, "attempted to resolve data stream options with null component templates"); + + List> dataStreamOptionsList = new ArrayList<>(); + for (String componentTemplateName : template.composedOf()) { + if (componentTemplates.containsKey(componentTemplateName) == false) { + continue; + } + ResettableValue dataStreamOptions = componentTemplates.get(componentTemplateName) + .template() + .resettableDataStreamOptions(); + if (dataStreamOptions.isDefined()) { + dataStreamOptionsList.add(dataStreamOptions); + } + } + // The actual index template's data stream options have the highest precedence. + if (template.template() != null && template.template().resettableDataStreamOptions().isDefined()) { + dataStreamOptionsList.add(template.template().resettableDataStreamOptions()); + } + return composeDataStreamOptions(dataStreamOptionsList); + } + + /** + * This method composes a series of data streams options to a final one. Since currently the data stream options + * contains only the failure store configuration which also contains only one field, the composition is a bit trivial. + * But we introduce the mechanics that will help extend it really easily. + * @param dataStreamOptionsList a sorted list of data stream options in the order that they will be composed + * @return the final data stream option configuration + */ + public static ResettableValue composeDataStreamOptions( + List> dataStreamOptionsList + ) { + if (dataStreamOptionsList.isEmpty()) { + return ResettableValue.undefined(); + } + DataStreamOptions.Template.Builder builder = null; + for (ResettableValue current : dataStreamOptionsList) { + if (current.isDefined() == false) { + continue; + } + if (current.shouldReset()) { + builder = null; + } else { + DataStreamOptions.Template currentTemplate = current.get(); + if (builder == null) { + builder = DataStreamOptions.Template.builder(currentTemplate); + } else { + // Currently failure store has only one field that needs to be defined so the composing of the failure store is trivial + builder.updateFailureStore(currentTemplate.failureStore()); + } + } + } + return builder == null ? ResettableValue.undefined() : ResettableValue.create(builder.build()); + } + /** * Given a state and a composable template, validate that the final composite template * generated by the composable template and all of its component templates contains valid diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ResettableValue.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ResettableValue.java new file mode 100644 index 0000000000000..4f38d2b8386a6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ResettableValue.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * This class holds a value of type @{param T} that can be in one of 3 states: + * - It has a concrete value, or + * - It is missing, or + * - It is meant to reset any other when it is composed with it. + * It is mainly used in template composition to capture the case when the user wished to reset any previous values. + * @param + */ +public class ResettableValue { + private static final ResettableValue RESET = new ResettableValue<>(true, null); + private static final ResettableValue UNDEFINED = new ResettableValue<>(false, null); + private static final String DISPLAY_RESET_VALUES = "display_reset"; + private static final Map HIDE_RESET_VALUES_PARAMS = Map.of(DISPLAY_RESET_VALUES, "false"); + + private final T value; + private final boolean isDefined; + + /** + * @return the reset state, meaning that this value is explicitly requested to be reset + */ + public static ResettableValue reset() { + @SuppressWarnings("unchecked") + ResettableValue t = (ResettableValue) RESET; + return t; + } + + /** + * @return the undefined state, meaning that this value has not been specified + */ + public static ResettableValue undefined() { + @SuppressWarnings("unchecked") + ResettableValue t = (ResettableValue) UNDEFINED; + return t; + } + + /** + * Wraps a value, if the value is null, it returns {@link #undefined()} + */ + public static ResettableValue create(T value) { + if (value == null) { + return undefined(); + } + return new ResettableValue<>(true, value); + } + + private ResettableValue(boolean isDefined, T value) { + this.isDefined = isDefined; + this.value = value; + } + + /** + * @return true if the state of this is reset + */ + public boolean shouldReset() { + return isDefined && value == null; + } + + /** + * @return true when the value is defined, either with a concrete value or reset. + */ + public boolean isDefined() { + return isDefined; + } + + /** + * @return the concrete value or null if it is in undefined or reset states. + */ + @Nullable + public T get() { + return value; + } + + /** + * Writes a single optional explicitly nullable value. This method is in direct relation with the + * {@link #read(StreamInput, Writeable.Reader)} which reads the respective value. It's the + * responsibility of the caller to preserve order of the fields and their backwards compatibility. + * + * @throws IOException + */ + static void write(StreamOutput out, ResettableValue value, Writeable.Writer writer) throws IOException { + out.writeBoolean(value.isDefined); + if (value.isDefined) { + out.writeBoolean(value.shouldReset()); + if (value.shouldReset() == false) { + writer.write(out, value.get()); + } + } + } + + /** + * Reads a single optional and explicitly nullable value. This method is in direct relation with the + * {@link #write(StreamOutput, ResettableValue, Writeable.Writer)} which writes the respective value. It's the + * responsibility of the caller to preserve order of the fields and their backwards compatibility. + * + * @throws IOException + */ + @Nullable + static ResettableValue read(StreamInput in, Writeable.Reader reader) throws IOException { + boolean isDefined = in.readBoolean(); + if (isDefined == false) { + return ResettableValue.undefined(); + } + boolean shouldReset = in.readBoolean(); + if (shouldReset) { + return ResettableValue.reset(); + } + T value = reader.read(in); + return ResettableValue.create(value); + } + + /** + * Gets the value and applies the function {@param f} when the value is not null. Slightly more efficient than + * this.map(f).get(). + */ + public U mapAndGet(Function f) { + if (isDefined() == false || shouldReset()) { + return null; + } else { + return f.apply(value); + } + } + + public ResettableValue map(Function mapper) { + Objects.requireNonNull(mapper); + if (isDefined == false) { + return ResettableValue.undefined(); + } + if (shouldReset()) { + return reset(); + } + return ResettableValue.create(mapper.apply(value)); + } + + /** + * Ιt merges the values of the ResettableValue's when they are defined using the provided mergeFunction. + */ + public static ResettableValue merge(ResettableValue initial, ResettableValue update, BiFunction mergeFunction) { + if (update.shouldReset()) { + return undefined(); + } + if (update.isDefined() == false) { + return initial; + } + if (initial.isDefined() == false || initial.shouldReset()) { + return update; + } + // Because we checked that's defined and not in reset state, we can directly apply the merge function. + return ResettableValue.create(mergeFunction.apply(initial.value, update.value)); + } + + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params, String field) throws IOException { + return toXContent(builder, params, field, Function.identity()); + } + + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params, String field, Function transformValue) + throws IOException { + if (isDefined) { + if (value != null) { + builder.field(field, transformValue.apply(value)); + } else if (ResettableValue.shouldDisplayResetValue(params)) { + builder.nullField(field); + } + } + return builder; + } + + public static boolean shouldDisplayResetValue(ToXContent.Params params) { + return params.paramAsBoolean(DISPLAY_RESET_VALUES, true); + } + + public static ToXContent.Params hideResetValues(ToXContent.Params params) { + return new ToXContent.DelegatingMapParams(HIDE_RESET_VALUES_PARAMS, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ResettableValue that = (ResettableValue) o; + return isDefined == that.isDefined && Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(value, isDefined); + } + + @Override + public String toString() { + return "ResettableValue{" + "value=" + value + ", isDefined=" + isDefined + '}'; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 0a9e79284ced6..7d354768ca987 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -47,12 +47,19 @@ public class Template implements SimpleDiffable