diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 6e15d64154960..9efb9c8b498aa 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index abd11068e7a65..b1e5a7bf933c9 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -287,8 +287,8 @@ steps: env: BWC_VERSION: 8.15.5 - - label: "{{matrix.image}} / 8.16.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.2 + - label: "{{matrix.image}} / 8.16.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.3 timeout_in_minutes: 300 matrix: setup: @@ -301,7 +301,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.2 + BWC_VERSION: 8.16.3 - label: "{{matrix.image}} / 8.17.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.1 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index c5846a763f5e8..ea0d7b13b55b4 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -63,6 +63,7 @@ steps: image: - almalinux-8-aarch64 - ubuntu-2004-aarch64 + - ubuntu-2404-aarch64 GRADLE_TASK: - checkPart1 - checkPart2 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f2d169cd2b30d..4c593bae62d7a 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.2#bwcTest + - label: 8.16.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.3#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.2 + BWC_VERSION: 8.16.3 retry: automatic: - exit_status: "-1" @@ -448,7 +448,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -490,7 +490,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 3cb983373138f..cf12ee8c15419 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -15,7 +15,7 @@ BWC_VERSION: - "8.13.4" - "8.14.3" - "8.15.5" - - "8.16.2" + - "8.16.3" - "8.17.1" - "8.18.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index e05c0774c9819..68c6ad5601546 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "8.16.2" + - "8.16.3" - "8.17.1" - "8.18.0" - "9.0.0" diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index d54eb798ce783..985c98bcd7883 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -22,7 +22,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:1b51ff6dba78c98d3e02b0cd64a8ce3238c7a40408d21e3af12a329d44db6f23", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:bfdeddb33330a281950c2a54adef991dbbe6a42832bc505d13b11beaf50ae73f", "-wolfi", "apk" ), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index e45a1d3dd25b1..7046a22204efa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -16,12 +16,14 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.component.ModuleComponentIdentifier; +import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.TaskProvider; import java.io.File; import java.nio.file.Path; import static org.elasticsearch.gradle.internal.util.DependenciesUtils.createFileCollectionFromNonTransitiveArtifactsView; +import static org.elasticsearch.gradle.internal.util.DependenciesUtils.thirdPartyDependenciesView; import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @@ -47,7 +49,6 @@ public TaskProvider createTask(Project project) { project.getDependencies().add(JDK_JAR_HELL_CONFIG_NAME, elasticsearchCoreProject); } } - TaskProvider resourcesTask = project.getTasks() .register("thirdPartyAuditResources", ExportElasticsearchBuildResourcesTask.class); Path resourcesDir = project.getBuildDir().toPath().resolve("third-party-audit-config"); @@ -59,9 +60,11 @@ public TaskProvider createTask(Project project) { // usually only one task is created. but this construct makes our integTests easier to setup project.getTasks().withType(ThirdPartyAuditTask.class).configureEach(t -> { Configuration runtimeConfiguration = project.getConfigurations().getByName("runtimeClasspath"); + FileCollection runtimeThirdParty = thirdPartyDependenciesView(runtimeConfiguration); Configuration compileOnly = project.getConfigurations() .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME); - t.setClasspath(runtimeConfiguration.plus(compileOnly)); + FileCollection compileOnlyThirdParty = thirdPartyDependenciesView(compileOnly); + t.getThirdPartyClasspath().from(runtimeThirdParty, compileOnlyThirdParty); t.getJarsToScan() .from( createFileCollectionFromNonTransitiveArtifactsView( @@ -78,7 +81,7 @@ public TaskProvider createTask(Project project) { t.getJavaHome().set(buildParams.flatMap(params -> params.getRuntimeJavaHome()).map(File::getPath)); t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile()); t.getJdkJarHellClasspath().from(jdkJarHellConfig); - t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly)); + t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnlyThirdParty)); }); return audit; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 442797775de2f..59ba9bae0a57d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -17,7 +17,6 @@ import org.gradle.api.JavaVersion; import org.gradle.api.file.ArchiveOperations; import org.gradle.api.file.ConfigurableFileCollection; -import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; import org.gradle.api.file.ProjectLayout; @@ -96,8 +95,6 @@ public abstract class ThirdPartyAuditTask extends DefaultTask { private final ProjectLayout projectLayout; - private FileCollection classpath; - @Inject public ThirdPartyAuditTask( ArchiveOperations archiveOperations, @@ -198,9 +195,7 @@ public Set getMissingClassExcludes() { public abstract Property getRuntimeJavaVersion(); @Classpath - public FileCollection getClasspath() { - return classpath; - } + public abstract ConfigurableFileCollection getThirdPartyClasspath(); @TaskAction public void runThirdPartyAudit() throws IOException { @@ -345,7 +340,7 @@ private String runForbiddenAPIsCli() throws IOException { if (javaHome.isPresent()) { spec.setExecutable(javaHome.get() + "/bin/java"); } - spec.classpath(getForbiddenAPIsClasspath(), classpath); + spec.classpath(getForbiddenAPIsClasspath(), getThirdPartyClasspath()); // Enable explicitly for each release as appropriate. Just JDK 20/21/22/23 for now, and just the vector module. if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21) || isJavaVersion(VERSION_22) || isJavaVersion(VERSION_23)) { spec.jvmArgs("--add-modules", "jdk.incubator.vector"); @@ -383,7 +378,7 @@ private boolean isJavaVersion(JavaVersion version) { private Set runJdkJarHellCheck() throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); ExecResult execResult = execOperations.javaexec(spec -> { - spec.classpath(getJdkJarHellClasspath(), classpath); + spec.classpath(getJdkJarHellClasspath(), getThirdPartyClasspath()); spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); spec.args(getJarExpandDir()); spec.setIgnoreExitValue(true); @@ -402,8 +397,4 @@ private Set runJdkJarHellCheck() throws IOException { return new TreeSet<>(Arrays.asList(jdkJarHellCheckList.split("\\r?\\n"))); } - public void setClasspath(FileCollection classpath) { - this.classpath = classpath; - } - } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java index 9080f62f19937..5d7386e2c2150 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java @@ -9,12 +9,16 @@ package org.elasticsearch.gradle.internal.util; +import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin; + import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ResolvableDependencies; import org.gradle.api.artifacts.component.ComponentIdentifier; +import org.gradle.api.artifacts.component.ProjectComponentIdentifier; import org.gradle.api.artifacts.result.ResolvedComponentResult; import org.gradle.api.artifacts.result.ResolvedDependencyResult; import org.gradle.api.file.FileCollection; +import org.gradle.api.provider.Provider; import org.gradle.api.specs.AndSpec; import org.gradle.api.specs.Spec; @@ -29,7 +33,7 @@ public static FileCollection createFileCollectionFromNonTransitiveArtifactsView( ) { ResolvableDependencies incoming = configuration.getIncoming(); return incoming.artifactView(viewConfiguration -> { - Set firstLevelDependencyComponents = incoming.getResolutionResult() + Provider> firstLevelDependencyComponents = incoming.getResolutionResult() .getRootComponent() .map( rootComponent -> rootComponent.getDependencies() @@ -39,12 +43,36 @@ public static FileCollection createFileCollectionFromNonTransitiveArtifactsView( .filter(dependency -> dependency.getSelected() instanceof ResolvedComponentResult) .map(dependency -> dependency.getSelected().getId()) .collect(Collectors.toSet()) - ) - .get(); + ); viewConfiguration.componentFilter( - new AndSpec<>(identifier -> firstLevelDependencyComponents.contains(identifier), componentFilter) + new AndSpec<>(identifier -> firstLevelDependencyComponents.get().contains(identifier), componentFilter) ); }).getFiles(); } + /** + * This method gives us an artifact view of a configuration that filters out all + * project dependencies that are not shadowed jars. + * Basically a thirdparty only view of the dependency tree. + */ + public static FileCollection thirdPartyDependenciesView(Configuration configuration) { + ResolvableDependencies incoming = configuration.getIncoming(); + return incoming.artifactView(v -> { + // resolve componentIdentifier for all shadowed project dependencies + Provider> shadowedDependencies = incoming.getResolutionResult() + .getRootComponent() + .map( + root -> root.getDependencies() + .stream() + .filter(dep -> dep instanceof ResolvedDependencyResult) + .map(dep -> (ResolvedDependencyResult) dep) + .filter(dep -> dep.getResolvedVariant().getDisplayName() == ShadowBasePlugin.COMPONENT_NAME) + .filter(dep -> dep.getSelected() instanceof ResolvedComponentResult) + .map(dep -> dep.getSelected().getId()) + .collect(Collectors.toSet()) + ); + // filter out project dependencies if they are not a shadowed dependency + v.componentFilter(i -> (i instanceof ProjectComponentIdentifier == false || shadowedDependencies.get().contains(i))); + }).getFiles(); + } } diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index 451701d74d690..9692af7adc5e6 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -279,6 +279,7 @@ "compatibilityChangeArea": { "type": "string", "enum": [ + "Aggregations", "Analysis", "Authorization", "Cluster and node setting", @@ -295,6 +296,7 @@ "Painless", "REST API", "Rollup", + "Search", "System requirement", "Transform" ] diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index a9da7995c2b36..53480a4a27b0b 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -155,10 +155,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions() @defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#nodeFeatures() -@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. -org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. -org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) +org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.cluster.node.DiscoveryNodes, org.elasticsearch.features.NodeFeature) @defaultMessage Do not construct this records outside the source files they are declared in org.elasticsearch.cluster.SnapshotsInProgress$ShardSnapshotStatus#(java.lang.String, org.elasticsearch.cluster.SnapshotsInProgress$ShardState, org.elasticsearch.repositories.ShardGeneration, java.lang.String, org.elasticsearch.repositories.ShardSnapshotResult) diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index bdb0704fcd880..f2e61861bd3a6 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -9,6 +9,7 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] :docker-repo: docker.elastic.co/elasticsearch/elasticsearch :docker-image: {docker-repo}:{version} +:docker-wolfi-image: {docker-repo}-wolfi:{version} :kib-docker-repo: docker.elastic.co/kibana/kibana :kib-docker-image: {kib-docker-repo}:{version} :plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins diff --git a/docs/changelog/116358.yaml b/docs/changelog/116358.yaml deleted file mode 100644 index 58b44a1e9bcf5..0000000000000 --- a/docs/changelog/116358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116358 -summary: Update Deberta tokenizer -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/116388.yaml b/docs/changelog/116388.yaml new file mode 100644 index 0000000000000..59cdafc9ec337 --- /dev/null +++ b/docs/changelog/116388.yaml @@ -0,0 +1,5 @@ +pr: 116388 +summary: Add support for partial shard results +area: EQL +type: enhancement +issues: [] diff --git a/docs/changelog/117153.yaml b/docs/changelog/117153.yaml deleted file mode 100644 index f7640c0a7ed6a..0000000000000 --- a/docs/changelog/117153.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117153 -summary: "ESQL: fix the column position in errors" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117989.yaml b/docs/changelog/117989.yaml new file mode 100644 index 0000000000000..e4967141b3ebd --- /dev/null +++ b/docs/changelog/117989.yaml @@ -0,0 +1,5 @@ +pr: 117989 +summary: ESQL Add esql hash function +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118143.yaml b/docs/changelog/118143.yaml new file mode 100644 index 0000000000000..4dcbf4b4b6c2c --- /dev/null +++ b/docs/changelog/118143.yaml @@ -0,0 +1,5 @@ +pr: 118143 +summary: Infrastructure for assuming cluster features in the next major version +area: "Infra/Core" +type: feature +issues: [] diff --git a/docs/changelog/118380.yaml b/docs/changelog/118380.yaml deleted file mode 100644 index 8b26c871fb172..0000000000000 --- a/docs/changelog/118380.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 118380 -summary: Restore original "is within leaf" value in `SparseVectorFieldMapper` -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/118484.yaml b/docs/changelog/118484.yaml new file mode 100644 index 0000000000000..41db476a42523 --- /dev/null +++ b/docs/changelog/118484.yaml @@ -0,0 +1,14 @@ +pr: 118484 +summary: Remove date histogram boolean support +area: Aggregations +type: breaking +issues: [] +breaking: + title: Remove date histogram boolean support + area: Aggregations + details: Elasticsearch no longer allows running Date Histogram aggregations + over boolean fields. Instead, use Terms aggregation for boolean + fields. + impact: We expect the impact to be minimal, as this never produced good + results, and has been deprecated for years. + notable: false diff --git a/docs/changelog/118544.yaml b/docs/changelog/118544.yaml new file mode 100644 index 0000000000000..d59783c4e6194 --- /dev/null +++ b/docs/changelog/118544.yaml @@ -0,0 +1,5 @@ +pr: 118544 +summary: ESQL - Remove restrictions for disjunctions in full text functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118617.yaml b/docs/changelog/118617.yaml new file mode 100644 index 0000000000000..a8793a114e913 --- /dev/null +++ b/docs/changelog/118617.yaml @@ -0,0 +1,5 @@ +pr: 118617 +summary: Add support for `sparse_vector` queries against `semantic_text` fields +area: "Search" +type: enhancement +issues: [] diff --git a/docs/changelog/118671.yaml b/docs/changelog/118671.yaml new file mode 100644 index 0000000000000..3931cc4179037 --- /dev/null +++ b/docs/changelog/118671.yaml @@ -0,0 +1,11 @@ +pr: 118671 +summary: Adjust `random_score` default field to `_seq_no` field +area: Search +type: breaking +issues: [] +breaking: + title: Adjust `random_score` default field to `_seq_no` field + area: Search + details: When providing a 'seed' parameter to a 'random_score' function in the 'function_score' query but NOT providing a 'field', the default 'field' is switched from '_id' to '_seq_no'. + impact: The random scoring and ordering may change when providing a 'seed' and not providing a 'field' to a 'random_score' function. + notable: false diff --git a/docs/changelog/118674.yaml b/docs/changelog/118674.yaml new file mode 100644 index 0000000000000..eeb90a3b38f66 --- /dev/null +++ b/docs/changelog/118674.yaml @@ -0,0 +1,5 @@ +pr: 118674 +summary: Ignore failures from renormalizing buckets in read-only index +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118681.yaml b/docs/changelog/118681.yaml new file mode 100644 index 0000000000000..a186c05e6cd7d --- /dev/null +++ b/docs/changelog/118681.yaml @@ -0,0 +1,6 @@ +pr: 118681 +summary: '`ConnectTransportException` returns retryable BAD_GATEWAY' +area: Network +type: enhancement +issues: + - 118320 diff --git a/docs/changelog/118697.yaml b/docs/changelog/118697.yaml new file mode 100644 index 0000000000000..6e24e6ae4b47f --- /dev/null +++ b/docs/changelog/118697.yaml @@ -0,0 +1,6 @@ +pr: 118697 +summary: Esql implicit casting for date nanos +area: ES|QL +type: enhancement +issues: + - 118476 diff --git a/docs/changelog/118837.yaml b/docs/changelog/118837.yaml new file mode 100644 index 0000000000000..38cd32f3a3513 --- /dev/null +++ b/docs/changelog/118837.yaml @@ -0,0 +1,5 @@ +pr: 118837 +summary: Add missing timeouts to rest-api-spec ILM APIs +area: "ILM+SLM" +type: bug +issues: [] diff --git a/docs/changelog/118844.yaml b/docs/changelog/118844.yaml new file mode 100644 index 0000000000000..f9f92bcaeb8cb --- /dev/null +++ b/docs/changelog/118844.yaml @@ -0,0 +1,5 @@ +pr: 118844 +summary: Add missing timeouts to rest-api-spec ingest APIs +area: Ingest Node +type: bug +issues: [] diff --git a/docs/reference/alias.asciidoc b/docs/reference/alias.asciidoc index 9d784f530d63c..f676644c4ec48 100644 --- a/docs/reference/alias.asciidoc +++ b/docs/reference/alias.asciidoc @@ -407,3 +407,24 @@ POST _aliases } ---- // TEST[s/^/PUT my-index-2099.05.06-000001\n/] + +[discrete] +[[remove-index]] +=== Remove an index + +To remove an index, use the aliases API's `remove_index` action. + +[source,console] +---- +POST _aliases +{ + "actions": [ + { + "remove_index": { + "index": "my-index-2099.05.06-000001" + } + } + ] +} +---- +// TEST[s/^/PUT my-index-2099.05.06-000001\n/] diff --git a/docs/reference/eql/eql-search-api.asciidoc b/docs/reference/eql/eql-search-api.asciidoc index d7f10f4627f6c..0fd490609277f 100644 --- a/docs/reference/eql/eql-search-api.asciidoc +++ b/docs/reference/eql/eql-search-api.asciidoc @@ -88,6 +88,53 @@ request that targets only `bar*` still returns an error. + Defaults to `true`. +`allow_partial_search_results`:: +(Optional, Boolean) + +If `false`, the request returns an error if one or more shards involved in the query are unavailable. ++ +If `true`, the query is executed only on the available shards, ignoring shard request timeouts and +<>. ++ +Defaults to `false`. ++ +To override the default for this field, set the +`xpack.eql.default_allow_partial_results` cluster setting to `true`. + + +[IMPORTANT] +==== +You can also specify this value using the `allow_partial_search_results` request body parameter. +If both parameters are specified, only the query parameter is used. +==== + + +`allow_partial_sequence_results`:: +(Optional, Boolean) + + +Used together with `allow_partial_search_results=true`, controls the behavior of sequence queries specifically +(if `allow_partial_search_results=false`, this setting has no effect). +If `true` and if some shards are unavailable, the sequences are calculated on available shards only. ++ +If `false` and if some shards are unavailable, the query only returns information about the shard failures, +but no further results. ++ +Defaults to `false`. ++ +Consider that sequences calculated with `allow_partial_search_results=true` can return incorrect results +(eg. if a <> clause matches records in unavailable shards) ++ +To override the default for this field, set the +`xpack.eql.default_allow_partial_sequence_results` cluster setting to `true`. + + +[IMPORTANT] +==== +You can also specify this value using the `allow_partial_sequence_results` request body parameter. +If both parameters are specified, only the query parameter is used. +==== + `ccs_minimize_roundtrips`:: (Optional, Boolean) If `true`, network round-trips between the local and the remote cluster are minimized when running cross-cluster search (CCS) requests. diff --git a/docs/reference/esql/functions/description/hash.asciidoc b/docs/reference/esql/functions/description/hash.asciidoc new file mode 100644 index 0000000000000..e074915c5132a --- /dev/null +++ b/docs/reference/esql/functions/description/hash.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. diff --git a/docs/reference/esql/functions/kibana/definition/hash.json b/docs/reference/esql/functions/kibana/definition/hash.json new file mode 100644 index 0000000000000..17a60cf45acfe --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/hash.json @@ -0,0 +1,82 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "hash", + "description" : "Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512.", + "signatures" : [ + { + "params" : [ + { + "name" : "algorithm", + "type" : "keyword", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "keyword", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "text", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "text", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/docs/hash.md b/docs/reference/esql/functions/kibana/docs/hash.md new file mode 100644 index 0000000000000..9826e80ec5bec --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/hash.md @@ -0,0 +1,7 @@ + + +### HASH +Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. + diff --git a/docs/reference/esql/functions/layout/hash.asciidoc b/docs/reference/esql/functions/layout/hash.asciidoc new file mode 100644 index 0000000000000..27c55ada6319b --- /dev/null +++ b/docs/reference/esql/functions/layout/hash.asciidoc @@ -0,0 +1,14 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-hash]] +=== `HASH` + +*Syntax* + +[.text-center] +image::esql/functions/signature/hash.svg[Embedded,opts=inline] + +include::../parameters/hash.asciidoc[] +include::../description/hash.asciidoc[] +include::../types/hash.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/hash.asciidoc b/docs/reference/esql/functions/parameters/hash.asciidoc new file mode 100644 index 0000000000000..d47a82d4ab214 --- /dev/null +++ b/docs/reference/esql/functions/parameters/hash.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`algorithm`:: +Hash algorithm to use. + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/signature/hash.svg b/docs/reference/esql/functions/signature/hash.svg new file mode 100644 index 0000000000000..f819e14c9d1a4 --- /dev/null +++ b/docs/reference/esql/functions/signature/hash.svg @@ -0,0 +1 @@ +HASH(algorithm,input) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index ce9636f5c5a3a..da9580a55151a 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -13,6 +13,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -37,6 +38,7 @@ include::layout/byte_length.asciidoc[] include::layout/concat.asciidoc[] include::layout/ends_with.asciidoc[] include::layout/from_base64.asciidoc[] +include::layout/hash.asciidoc[] include::layout/left.asciidoc[] include::layout/length.asciidoc[] include::layout/locate.asciidoc[] diff --git a/docs/reference/esql/functions/types/hash.asciidoc b/docs/reference/esql/functions/types/hash.asciidoc new file mode 100644 index 0000000000000..786ba03b2aa60 --- /dev/null +++ b/docs/reference/esql/functions/types/hash.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +algorithm | input | result +keyword | keyword | keyword +keyword | text | keyword +text | keyword | keyword +text | text | keyword +|=== diff --git a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc index 6ec261fabc448..0b3c17fb2caae 100644 --- a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc @@ -24,7 +24,7 @@ and retention requirements. You want to send log files to an {es} cluster so you can visualize and analyze the data. This data has the following retention requirements: -* When the write index reaches 50GB or is 30 days old, roll over to a new index. +* When the primary shard size of the write index reaches 50GB or the index is 30 days old, roll over to a new index. * After rollover, keep indices in the hot data tier for 30 days. * 30 days after rollover: ** Move indices to the warm data tier. @@ -84,7 +84,7 @@ To save the `logs@lifecycle` policy as a new policy in {kib}: . On the **Edit policy logs** page, toggle **Save as new policy**, and then provide a new name for the policy, for example, `logs-custom`. The `logs@lifecycle` policy uses the recommended rollover defaults: Start writing to a new -index when the current write index reaches 50GB or becomes 30 days old. +index when the primary shard size of the current write index reaches 50GB or the index becomes 30 days old. To view or change the rollover settings, click **Advanced settings** for the hot phase. Then disable **Use recommended defaults** to display the rollover diff --git a/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png b/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png index 14ff66e410835..d7f314cedb261 100644 Binary files a/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png and b/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png differ diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index 1b001a3175b8c..04b086a758f9d 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -172,8 +172,9 @@ The API returns the following response: "attributes": {}, "roles": [...], "version": "8.10.0", - "min_index_version": 7000099, - "max_index_version": 8100099 + "min_index_version": 8000099, + "min_read_only_index_version": 7000099, + "max_index_version": 9004000 }, "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <4> "allocation" : "primary|replica|unused" <5> @@ -193,6 +194,7 @@ The API returns the following response: // TESTRESPONSE[s/"roles": \[[^]]*\]/"roles": $body.$_path/] // TESTRESPONSE[s/"8.10.0"/\$node_version/] // TESTRESPONSE[s/"min_index_version": 7000099/"min_index_version": $body.$_path/] +// TESTRESPONSE[s/"min_index_version": 7000099/"min_index_version": $body.$_path/] // TESTRESPONSE[s/"max_index_version": 8100099/"max_index_version": $body.$_path/] diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index 6f7e2a4d9f988..bf9c4d14db290 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -138,8 +138,8 @@ normal priority deployments. Controls how many inference requests are allowed in the queue at a time. Every machine learning node in the cluster where the model can be allocated has a queue of this size; when the number of requests exceeds the total value, -new requests are rejected with a 429 error. Defaults to 1024. Max allowed value -is 1000000. +new requests are rejected with a 429 error. Defaults to 10000. Max allowed value +is 100000. `threads_per_allocation`:: (Optional, integer) @@ -173,7 +173,7 @@ The API returns the following results: "model_bytes": 265632637, "threads_per_allocation" : 1, "number_of_allocations" : 1, - "queue_capacity" : 1024, + "queue_capacity" : 10000, "priority": "normal" }, "routing_table": { @@ -229,4 +229,4 @@ POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_se } } -------------------------------------------------- -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 8694d7f5b46c6..86a0e567f6eec 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -55,6 +55,12 @@ docker pull {docker-image} // REVIEWED[DEC.10.24] -- +Alternatevely, you can use the Wolfi based image. Using Wolfi based images requires Docker version 20.10.10 or superior. +[source,sh,subs="attributes"] +---- +docker pull {docker-wolfi-image} +---- + . Optional: Install https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your environment. Then use Cosign to verify the {es} image's signature. diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 85af5b120f6fd..c150f01153d35 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexVersion; @@ -80,7 +79,6 @@ import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -88,20 +86,12 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class PercolateQueryBuilder extends AbstractQueryBuilder { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class); - static final String DOCUMENT_TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. " - + "The [document_type] should no longer be specified."; - static final String TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. " - + "The [type] of the indexed document should no longer be specified."; - public static final String NAME = "percolate"; static final ParseField DOCUMENT_FIELD = new ParseField("document"); static final ParseField DOCUMENTS_FIELD = new ParseField("documents"); private static final ParseField NAME_FIELD = new ParseField("name"); private static final ParseField QUERY_FIELD = new ParseField("field"); - private static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type"); - private static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type"); private static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index"); private static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id"); private static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing"); @@ -368,10 +358,6 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep ); } - private static BiConsumer deprecateAndIgnoreType(String key, String message) { - return (target, type) -> deprecationLogger.compatibleCritical(key, message); - } - private static BytesReference parseDocument(XContentParser parser) throws IOException { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.copyCurrentStructure(parser); diff --git a/muted-tests.yml b/muted-tests.yml index da52d585267a5..2677b3e75d86e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -87,9 +87,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115816 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=cat.shards/10_basic/Help} - issue: https://github.com/elastic/elasticsearch/issues/116110 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 @@ -156,27 +153,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117473 - class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/117525 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} - issue: https://github.com/elastic/elasticsearch/issues/116777 -- class: "org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.single_node.EsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.mixed.MultiClusterEsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/118460 -- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/118460 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {scoring.QstrWithFieldAndScoringSortedEval} - issue: https://github.com/elastic/elasticsearch/issues/117751 - class: org.elasticsearch.search.ccs.CrossClusterIT method: testCancel issue: https://github.com/elastic/elasticsearch/issues/108061 @@ -305,8 +281,15 @@ tests: - class: org.elasticsearch.xpack.security.QueryableReservedRolesIT method: testDeletingAndCreatingSecurityIndexTriggersSynchronization issue: https://github.com/elastic/elasticsearch/issues/118806 -- class: org.elasticsearch.xpack.esql.session.IndexResolverFieldNamesTests - issue: https://github.com/elastic/elasticsearch/issues/118814 +- class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests + method: testPruneSome + issue: https://github.com/elastic/elasticsearch/issues/118728 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/indices/shard-stores/line_150} + issue: https://github.com/elastic/elasticsearch/issues/118896 +- class: org.elasticsearch.cluster.service.MasterServiceTests + method: testThreadContext + issue: https://github.com/elastic/elasticsearch/issues/118914 # Examples: # diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index d8f906b23d523..28bcac9f0242d 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -67,6 +67,9 @@ excludeList.add('indices.resolve_index/20_resolve_system_index/*') // Excluded because the error has changed excludeList.add('aggregations/percentiles_hdr_metric/Negative values test') +// sync_id is removed in 9.0 +excludeList.add("cat.shards/10_basic/Help") + def clusterPath = getPath() buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> diff --git a/renovate.json b/renovate.json index c1637ae651c1c..71c6301f8e0c2 100644 --- a/renovate.json +++ b/renovate.json @@ -7,8 +7,8 @@ "schedule": [ "after 1pm on tuesday" ], - "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery"], - "baseBranches": ["main", "8.x"], + "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery", "auto-merge-without-approval"], + "baseBranches": ["main", "8.x", "8.17", "8.16"], "packageRules": [ { "groupName": "wolfi (versioned)", diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 7347d9c1312dd..bdee32e596c4c 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -69,4 +69,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search/520_fetch_fields/fetch _seq_no via fields", "error code is changed from 5xx to 400 in 9.0") task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions") task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions") + task.skipTest("synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set", "Can't work until auto-expand replicas is 0-1 for synonyms index") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json index c854c44d9d761..0f9af508f4c16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json @@ -41,6 +41,16 @@ "type": "time", "description": "Update the time interval in which the results (partial or final) for this search will be available", "default": "5d" + }, + "allow_partial_search_results": { + "type":"boolean", + "description":"Control whether the query should keep running in case of shard failures, and return partial results", + "default":false + }, + "allow_partial_sequence_results": { + "type":"boolean", + "description":"Control whether a sequence query should return partial results or no results at all in case of shard failures. This option has effect only if [allow_partial_search_results] is true.", + "default":false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json index 2ff1031ad5c52..cd6397fb61586 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json @@ -25,6 +25,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json index c793ed09281ae..94c37adb802f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -33,6 +33,10 @@ "only_errors": { "type": "boolean", "description": "filters the indices included in the response to ones in an ILM error state, implies only_managed" + }, + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json index 17bf813093dd6..5abdfac7f5b30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json @@ -31,6 +31,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json index 5a12a778241b3..b7fdbe04a0ffb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The lifecycle policy definition to register" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json index 88b020071ab82..7141673ff9a9d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json index 8401f93badfc4..962fa77263ee4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json index fe50da720a4da..f76d328836d90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json @@ -26,6 +26,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json index e97d1da276906..341ff5081e270 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json @@ -26,6 +26,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json index 6d088e3f164f4..9c2677d1f7b2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json @@ -27,6 +27,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } }, "body":{ "description":"The database configuration definition", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json index 18487969b1a90..782048b98160a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json @@ -27,6 +27,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } }, "body":{ "description":"The database configuration definition", diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index d6c98673253fb..4e6bd83f07955 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -1,8 +1,8 @@ ---- -"Reload analyzers for specific synonym set": +setup: - requires: cluster_features: ["gte_v8.10.0"] reason: Reloading analyzers for specific synonym set is introduced in 8.10.0 + # Create synonyms_set1 - do: synonyms.put_synonym: @@ -100,7 +100,12 @@ - '{"index": {"_index": "my_index2", "_id": "2"}}' - '{"my_field": "goodbye"}' - # An update of synonyms_set1 must trigger auto-reloading of analyzers only for synonyms_set1 +--- +"Reload analyzers for specific synonym set": +# These specific tests can't succeed in BwC, as synonyms auto-expand replicas are 0-all. Replicas can't be associated to +# upgraded nodes, and thus we are not able to guarantee that the shards are not failed. +# This test is skipped for BwC until synonyms index has auto-exapnd replicas set to 0-1. + - do: synonyms.put_synonym: id: synonyms_set1 @@ -108,13 +113,12 @@ synonyms_set: - synonyms: "hello, salute" - synonyms: "ciao => goodbye" + - match: { result: "updated" } - gt: { reload_analyzers_details._shards.total: 0 } - gt: { reload_analyzers_details._shards.successful: 0 } - match: { reload_analyzers_details._shards.failed: 0 } - - length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index - - match: { reload_analyzers_details.reload_details.0.index: "my_index1" } - - match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" } + # Confirm that the index analyzers are reloaded for my_index1 - do: @@ -127,6 +131,23 @@ query: salute - match: { hits.total.value: 1 } +--- +"Check analyzer reloaded and non failed shards for bwc tests": + + - do: + synonyms.put_synonym: + id: synonyms_set1 + body: + synonyms_set: + - synonyms: "hello, salute" + - synonyms: "ciao => goodbye" + - match: { result: "updated" } + - gt: { reload_analyzers_details._shards.total: 0 } + - gt: { reload_analyzers_details._shards.successful: 0 } + - length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index + - match: { reload_analyzers_details.reload_details.0.index: "my_index1" } + - match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" } + # Confirm that the index analyzers are still the same for my_index2 - do: search: diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index f5e581a81a37c..d3e235f1cd82a 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -138,6 +138,8 @@ static TransportVersion def(int id) { public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_806_00_0); public static final TransportVersion SEMANTIC_QUERY_LENIENT = def(8_807_00_0); public static final TransportVersion ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS = def(8_808_00_0); + public static final TransportVersion EQL_ALLOW_PARTIAL_SEARCH_RESULTS = def(8_809_00_0); + public static final TransportVersion NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION = def(8_810_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 47c43eadcfb03..8873c9b0e281e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -191,6 +191,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_16_2 = new Version(8_16_02_99); + public static final Version V_8_16_3 = new Version(8_16_03_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_8_17_1 = new Version(8_17_01_99); public static final Version V_8_18_0 = new Version(8_18_00_99); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index ad285cbd391cd..5b5a6577082d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -9,11 +9,12 @@ package org.elasticsearch.cluster; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContent; @@ -79,28 +80,61 @@ public Map> nodeFeatures() { return nodeFeatures; } - /** - * The features in all nodes in the cluster. - *

- * NOTE: This should not be used directly. - * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. - */ - public Set allNodeFeatures() { + private Set allNodeFeatures() { if (allNodeFeatures == null) { allNodeFeatures = Set.copyOf(calculateAllNodeFeatures(nodeFeatures.values())); } return allNodeFeatures; } + /** + * Returns {@code true} if {@code node} can have assumed features. + * @see org.elasticsearch.env.BuildVersion#canRemoveAssumedFeatures + */ + public static boolean featuresCanBeAssumedForNode(DiscoveryNode node) { + return node.getBuildVersion().canRemoveAssumedFeatures(); + } + + /** + * Returns {@code true} if one or more nodes in {@code nodes} can have assumed features. + * @see org.elasticsearch.env.BuildVersion#canRemoveAssumedFeatures + */ + public static boolean featuresCanBeAssumedForNodes(DiscoveryNodes nodes) { + return nodes.getAllNodes().stream().anyMatch(n -> n.getBuildVersion().canRemoveAssumedFeatures()); + } + /** * {@code true} if {@code feature} is present on all nodes in the cluster. *

* NOTE: This should not be used directly. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. */ - @SuppressForbidden(reason = "directly reading cluster features") - public boolean clusterHasFeature(NodeFeature feature) { - return allNodeFeatures().contains(feature.id()); + public boolean clusterHasFeature(DiscoveryNodes nodes, NodeFeature feature) { + assert nodes.getNodes().keySet().equals(nodeFeatures.keySet()) + : "Cluster features nodes " + nodeFeatures.keySet() + " is different to discovery nodes " + nodes.getNodes().keySet(); + + // basic case + boolean allNodesHaveFeature = allNodeFeatures().contains(feature.id()); + if (allNodesHaveFeature) { + return true; + } + + // if the feature is assumed, check the versions more closely + // it's actually ok if the feature is assumed, and all nodes missing the feature can assume it + // TODO: do we need some kind of transient cache of this calculation? + if (feature.assumedAfterNextCompatibilityBoundary()) { + for (var nf : nodeFeatures.entrySet()) { + if (nf.getValue().contains(feature.id()) == false + && featuresCanBeAssumedForNode(nodes.getNodes().get(nf.getKey())) == false) { + return false; + } + } + + // all nodes missing the feature can assume it - so that's alright then + return true; + } + + return false; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 5235293a54d95..74a8dc7851c89 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -39,6 +40,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -137,8 +139,8 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); - Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); - Set allNodesFeatures = ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); + Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); // as present in cluster state + Set effectiveClusterFeatures = calculateEffectiveClusterFeatures(newState.nodes(), nodeFeatures); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -174,14 +176,17 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); - enforceNodeFeatureBarrier(node.getId(), allNodesFeatures, features); + Set newNodeEffectiveFeatures = enforceNodeFeatureBarrier(node, effectiveClusterFeatures, features); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); + nodesBuilder.add(node); compatibilityVersionsMap.put(node.getId(), compatibilityVersions); + // store the actual node features here, not including assumed features, as this is persisted in cluster state nodeFeatures.put(node.getId(), features); - allNodesFeatures.retainAll(features); + effectiveClusterFeatures.retainAll(newNodeEffectiveFeatures); + nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -355,6 +360,35 @@ private static void blockForbiddenVersions(TransportVersion joiningTransportVers } } + /** + * Calculate the cluster's effective features. This includes all features that are assumed on any nodes in the cluster, + * that are also present across the whole cluster as a result. + */ + private Set calculateEffectiveClusterFeatures(DiscoveryNodes nodes, Map> nodeFeatures) { + if (featureService.featuresCanBeAssumedForNodes(nodes)) { + Set assumedFeatures = featureService.getNodeFeatures() + .values() + .stream() + .filter(NodeFeature::assumedAfterNextCompatibilityBoundary) + .map(NodeFeature::id) + .collect(Collectors.toSet()); + + // add all assumed features to the featureset of all nodes of the next major version + nodeFeatures = new HashMap<>(nodeFeatures); + for (var node : nodes.getNodes().entrySet()) { + if (featureService.featuresCanBeAssumedForNode(node.getValue())) { + assert nodeFeatures.containsKey(node.getKey()) : "Node " + node.getKey() + " does not have any features"; + nodeFeatures.computeIfPresent(node.getKey(), (k, v) -> { + var newFeatures = new HashSet<>(v); + return newFeatures.addAll(assumedFeatures) ? newFeatures : v; + }); + } + } + } + + return ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); + } + /** * Ensures that all indices are compatible with the given index version. This will ensure that all indices in the given metadata * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index @@ -461,13 +495,44 @@ public static void ensureVersionBarrier(Version joiningNodeVersion, Version minC } } - private void enforceNodeFeatureBarrier(String nodeId, Set existingNodesFeatures, Set newNodeFeatures) { + /** + * Enforces the feature join barrier - a joining node should have all features already present in all existing nodes in the cluster + * + * @return The set of features that this node has (including assumed features) + */ + private Set enforceNodeFeatureBarrier(DiscoveryNode node, Set effectiveClusterFeatures, Set newNodeFeatures) { // prevent join if it does not have one or more features that all other nodes have - Set missingFeatures = new HashSet<>(existingNodesFeatures); + Set missingFeatures = new HashSet<>(effectiveClusterFeatures); missingFeatures.removeAll(newNodeFeatures); - if (missingFeatures.isEmpty() == false) { - throw new IllegalStateException("Node " + nodeId + " is missing required features " + missingFeatures); + if (missingFeatures.isEmpty()) { + // nothing missing - all ok + return newNodeFeatures; + } + + if (featureService.featuresCanBeAssumedForNode(node)) { + // it might still be ok for this node to join if this node can have assumed features, + // and all the missing features are assumed + // we can get the NodeFeature object direct from this node's registered features + // as all existing nodes in the cluster have the features present in existingNodesFeatures, including this one + newNodeFeatures = new HashSet<>(newNodeFeatures); + for (Iterator it = missingFeatures.iterator(); it.hasNext();) { + String feature = it.next(); + NodeFeature nf = featureService.getNodeFeatures().get(feature); + if (nf.assumedAfterNextCompatibilityBoundary()) { + // its ok for this feature to be missing from this node + it.remove(); + // and it should be assumed to still be in the cluster + newNodeFeatures.add(feature); + } + // even if we don't remove it, still continue, so the exception message below is accurate + } + } + + if (missingFeatures.isEmpty()) { + return newNodeFeatures; + } else { + throw new IllegalStateException("Node " + node.getId() + " is missing required features " + missingFeatures); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 7bf367f99b929..7c757e7657853 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -37,6 +37,7 @@ import java.util.SortedSet; import java.util.TreeSet; +import static org.elasticsearch.TransportVersions.NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; /** @@ -325,7 +326,17 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); + Version version = Version.readVersion(in); + IndexVersion minIndexVersion = IndexVersion.readVersion(in); + IndexVersion minReadOnlyIndexVersion; + if (in.getTransportVersion().onOrAfter(NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION)) { + minReadOnlyIndexVersion = IndexVersion.readVersion(in); + } else { + minReadOnlyIndexVersion = minIndexVersion; + + } + IndexVersion maxIndexVersion = IndexVersion.readVersion(in); + versionInfo = new VersionInformation(version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); if (in.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { this.externalId = readStringLiteral.read(in); } else { @@ -360,6 +371,9 @@ public void writeTo(StreamOutput out) throws IOException { }); Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); + if (out.getTransportVersion().onOrAfter(NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION)) { + IndexVersion.writeVersion(versionInfo.minReadOnlyIndexVersion(), out); + } IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); if (out.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { out.writeString(externalId); @@ -478,6 +492,10 @@ public IndexVersion getMinIndexVersion() { return versionInfo.minIndexVersion(); } + public IndexVersion getMinReadOnlyIndexVersion() { + return versionInfo.minReadOnlyIndexVersion(); + } + public IndexVersion getMaxIndexVersion() { return versionInfo.maxIndexVersion(); } @@ -577,6 +595,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.field("version", versionInfo.buildVersion().toString()); builder.field("min_index_version", versionInfo.minIndexVersion()); + builder.field("min_read_only_index_version", versionInfo.minReadOnlyIndexVersion()); builder.field("max_index_version", versionInfo.maxIndexVersion()); builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 5e6dec7b68062..f733ab223fdd1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -69,6 +69,7 @@ public class DiscoveryNodes implements Iterable, SimpleDiffable> tiersToNodeIds; @@ -84,6 +85,7 @@ private DiscoveryNodes( Version minNodeVersion, IndexVersion maxDataNodeCompatibleIndexVersion, IndexVersion minSupportedIndexVersion, + IndexVersion minReadOnlySupportedIndexVersion, Map> tiersToNodeIds ) { this.nodeLeftGeneration = nodeLeftGeneration; @@ -100,6 +102,8 @@ private DiscoveryNodes( this.maxNodeVersion = maxNodeVersion; this.maxDataNodeCompatibleIndexVersion = maxDataNodeCompatibleIndexVersion; this.minSupportedIndexVersion = minSupportedIndexVersion; + this.minReadOnlySupportedIndexVersion = minReadOnlySupportedIndexVersion; + assert minReadOnlySupportedIndexVersion.onOrBefore(minSupportedIndexVersion); assert (localNodeId == null) == (localNode == null); this.tiersToNodeIds = tiersToNodeIds; } @@ -118,6 +122,7 @@ public DiscoveryNodes withMasterNodeId(@Nullable String masterNodeId) { minNodeVersion, maxDataNodeCompatibleIndexVersion, minSupportedIndexVersion, + minReadOnlySupportedIndexVersion, tiersToNodeIds ); } @@ -374,6 +379,13 @@ public IndexVersion getMinSupportedIndexVersion() { return minSupportedIndexVersion; } + /** + * Returns the minimum index version for read-only indices supported by all nodes in the cluster + */ + public IndexVersion getMinReadOnlySupportedIndexVersion() { + return minReadOnlySupportedIndexVersion; + } + /** * Return the node-left generation, which is the number of times the cluster membership has been updated by removing one or more nodes. *

@@ -840,6 +852,7 @@ public DiscoveryNodes build() { Version maxNodeVersion = null; IndexVersion maxDataNodeCompatibleIndexVersion = null; IndexVersion minSupportedIndexVersion = null; + IndexVersion minReadOnlySupportedIndexVersion = null; for (Map.Entry nodeEntry : nodes.entrySet()) { DiscoveryNode discoNode = nodeEntry.getValue(); Version version = discoNode.getVersion(); @@ -849,6 +862,7 @@ public DiscoveryNodes build() { minNodeVersion = min(minNodeVersion, version); maxNodeVersion = max(maxNodeVersion, version); minSupportedIndexVersion = max(minSupportedIndexVersion, discoNode.getMinIndexVersion()); + minReadOnlySupportedIndexVersion = max(minReadOnlySupportedIndexVersion, discoNode.getMinReadOnlyIndexVersion()); } final long newNodeLeftGeneration; @@ -881,6 +895,7 @@ public DiscoveryNodes build() { Objects.requireNonNullElse(minNodeVersion, Version.CURRENT.minimumCompatibilityVersion()), Objects.requireNonNullElse(maxDataNodeCompatibleIndexVersion, IndexVersion.current()), Objects.requireNonNullElse(minSupportedIndexVersion, IndexVersions.MINIMUM_COMPATIBLE), + Objects.requireNonNullElse(minReadOnlySupportedIndexVersion, IndexVersions.MINIMUM_READONLY_COMPATIBLE), computeTiersToNodesMap(dataNodes) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java b/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java index a4d0ff1eb55e4..852f31db69c92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java @@ -18,20 +18,23 @@ /** * Represents the versions of various aspects of an Elasticsearch node. - * @param buildVersion The node {@link BuildVersion} - * @param minIndexVersion The minimum {@link IndexVersion} supported by this node - * @param maxIndexVersion The maximum {@link IndexVersion} supported by this node + * @param buildVersion The node {@link BuildVersion} + * @param minIndexVersion The minimum {@link IndexVersion} supported by this node + * @param minReadOnlyIndexVersion The minimum {@link IndexVersion} for read-only indices supported by this node + * @param maxIndexVersion The maximum {@link IndexVersion} supported by this node */ public record VersionInformation( BuildVersion buildVersion, Version nodeVersion, IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, IndexVersion maxIndexVersion ) { public static final VersionInformation CURRENT = new VersionInformation( BuildVersion.current(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ); @@ -39,11 +42,18 @@ public record VersionInformation( Objects.requireNonNull(buildVersion); Objects.requireNonNull(nodeVersion); Objects.requireNonNull(minIndexVersion); + Objects.requireNonNull(minReadOnlyIndexVersion); Objects.requireNonNull(maxIndexVersion); + assert minReadOnlyIndexVersion.onOrBefore(minIndexVersion) : minReadOnlyIndexVersion + " > " + minIndexVersion; } - public VersionInformation(BuildVersion version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) { - this(version, Version.CURRENT, minIndexVersion, maxIndexVersion); + public VersionInformation( + BuildVersion version, + IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, + IndexVersion maxIndexVersion + ) { + this(version, Version.CURRENT, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); /* * Whilst DiscoveryNode.getVersion exists, we need to be able to get a Version from VersionInfo * This needs to be consistent - on serverless, BuildVersion has an id of -1, which translates @@ -57,7 +67,17 @@ public VersionInformation(BuildVersion version, IndexVersion minIndexVersion, In @Deprecated public VersionInformation(Version version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) { - this(BuildVersion.fromVersionId(version.id()), version, minIndexVersion, maxIndexVersion); + this(version, minIndexVersion, minIndexVersion, maxIndexVersion); + } + + @Deprecated + public VersionInformation( + Version version, + IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, + IndexVersion maxIndexVersion + ) { + this(BuildVersion.fromVersionId(version.id()), version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); } @Deprecated diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index ce849c26ab780..98715127351aa 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -110,6 +110,7 @@ private void openProbeConnection(ActionListener listener) new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ), diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 7a6b27eab2330..5c3602283fef3 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -37,6 +37,12 @@ */ public abstract class BuildVersion implements ToXContentFragment, Writeable { + /** + * Checks if this version can operate properly in a cluster without features + * that are assumed in the currently running Elasticsearch. + */ + public abstract boolean canRemoveAssumedFeatures(); + /** * Check whether this version is on or after a minimum threshold. * diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index a7e1a4fee341d..70aa3f6639a4d 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -47,6 +47,17 @@ final class DefaultBuildVersion extends BuildVersion { this(in.readVInt()); } + @Override + public boolean canRemoveAssumedFeatures() { + /* + * We can remove assumed features if the node version is the next major version. + * This is because the next major version can only form a cluster with the + * latest minor version of the previous major, so any features introduced before that point + * (that are marked as assumed in the running code version) are automatically met by that version. + */ + return version.major == Version.CURRENT.major + 1; + } + @Override public boolean onOrAfterMinimumCompatible() { return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 9a0ac7cafc183..c04fbae05ee2c 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -9,7 +9,10 @@ package org.elasticsearch.features; +import org.elasticsearch.cluster.ClusterFeatures; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -38,9 +41,7 @@ public class FeatureService { * as the local node's supported feature set */ public FeatureService(List specs) { - - var featureData = FeatureData.createFromSpecifications(specs); - nodeFeatures = featureData.getNodeFeatures(); + this.nodeFeatures = FeatureData.createFromSpecifications(specs).getNodeFeatures(); logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } @@ -53,11 +54,25 @@ public Map getNodeFeatures() { return nodeFeatures; } + /** + * Returns {@code true} if {@code node} can have assumed features. + */ + public boolean featuresCanBeAssumedForNode(DiscoveryNode node) { + return ClusterFeatures.featuresCanBeAssumedForNode(node); + } + + /** + * Returns {@code true} if one or more nodes in {@code nodes} can have assumed features. + */ + public boolean featuresCanBeAssumedForNodes(DiscoveryNodes nodes) { + return ClusterFeatures.featuresCanBeAssumedForNodes(nodes); + } + /** * Returns {@code true} if all nodes in {@code state} support feature {@code feature}. */ @SuppressForbidden(reason = "We need basic feature information from cluster state") public boolean clusterHasFeature(ClusterState state, NodeFeature feature) { - return state.clusterFeatures().clusterHasFeature(feature); + return state.clusterFeatures().clusterHasFeature(state.nodes(), feature); } } diff --git a/server/src/main/java/org/elasticsearch/features/NodeFeature.java b/server/src/main/java/org/elasticsearch/features/NodeFeature.java index 957308e805562..961b386d62802 100644 --- a/server/src/main/java/org/elasticsearch/features/NodeFeature.java +++ b/server/src/main/java/org/elasticsearch/features/NodeFeature.java @@ -15,10 +15,17 @@ * A feature published by a node. * * @param id The feature id. Must be unique in the node. + * @param assumedAfterNextCompatibilityBoundary + * {@code true} if this feature is removed at the next compatibility boundary (ie next major version), + * and so should be assumed to be true for all nodes after that boundary. */ -public record NodeFeature(String id) { +public record NodeFeature(String id, boolean assumedAfterNextCompatibilityBoundary) { public NodeFeature { Objects.requireNonNull(id); } + + public NodeFeature(String id) { + this(id, false); + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index 6d4b2dd4ab1f5..88f1ab1ba5c2e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -13,12 +13,10 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -30,7 +28,6 @@ * A function that computes a random score for the matched documents */ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RandomScoreFunctionBuilder.class); public static final String NAME = "random_score"; private String field; @@ -140,17 +137,7 @@ protected ScoreFunction doToFunction(SearchExecutionContext context) { // DocID-based random score generation return new RandomScoreFunction(hash(context.nowInMillis()), salt, null); } else { - String fieldName; - if (field == null) { - deprecationLogger.warn( - DeprecationCategory.QUERIES, - "seed_requires_field", - "As of version 7.0 Elasticsearch will require that a [field] parameter is provided when a [seed] is set" - ); - fieldName = IdFieldMapper.NAME; - } else { - fieldName = field; - } + final String fieldName = Objects.requireNonNullElse(field, SeqNoFieldMapper.NAME); if (context.isFieldMapped(fieldName) == false) { if (context.hasMappings() == false) { // no mappings: the index is empty anyway diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index 15b9eacfa2118..de56ead9b5aba 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -294,8 +294,8 @@ protected boolean areFileSettingsApplied(ClusterState clusterState) { } @SuppressForbidden(reason = "need to check file settings support on exact cluster state") - private static boolean supportsFileSettings(ClusterState clusterState) { - return clusterState.clusterFeatures().clusterHasFeature(FileSettingsFeatures.FILE_SETTINGS_SUPPORTED); + private boolean supportsFileSettings(ClusterState clusterState) { + return clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), FileSettingsFeatures.FILE_SETTINGS_SUPPORTED); } private void setReady(boolean ready) { diff --git a/server/src/main/java/org/elasticsearch/rest/RestUtils.java b/server/src/main/java/org/elasticsearch/rest/RestUtils.java index df51b57f0859c..bbca086e345f7 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestUtils.java +++ b/server/src/main/java/org/elasticsearch/rest/RestUtils.java @@ -15,6 +15,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import java.net.URI; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.Arrays; @@ -35,6 +36,13 @@ public class RestUtils { public static final UnaryOperator REST_DECODER = RestUtils::decodeComponent; + public static void decodeQueryString(URI uri, Map params) { + final var rawQuery = uri.getRawQuery(); + if (Strings.hasLength(rawQuery)) { + decodeQueryString(rawQuery, 0, params); + } + } + public static void decodeQueryString(String s, int fromIndex, Map params) { if (fromIndex < 0) { return; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index a8ccd1c76d031..4d0f58756b11c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.Rounding; -import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregator; @@ -42,49 +41,6 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { ); builder.register(DateHistogramAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.RANGE, DateRangeHistogramAggregator::new, true); - - builder.register( - DateHistogramAggregationBuilder.REGISTRY_KEY, - CoreValuesSourceType.BOOLEAN, - ( - name, - factories, - rounding, - order, - keyed, - minDocCount, - downsampledResultsOffset, - extendedBounds, - hardBounds, - valuesSourceConfig, - context, - parent, - cardinality, - metadata) -> { - DEPRECATION_LOGGER.warn( - DeprecationCategory.AGGREGATIONS, - "date-histogram-boolean", - "Running DateHistogram aggregations on [boolean] fields is deprecated" - ); - return DateHistogramAggregator.build( - name, - factories, - rounding, - order, - keyed, - minDocCount, - downsampledResultsOffset, - extendedBounds, - hardBounds, - valuesSourceConfig, - context, - parent, - cardinality, - metadata - ); - }, - true - ); } private final DateHistogramAggregationSupplier aggregatorSupplier; diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index 648d27c885843..302175cc4f5a0 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -41,6 +42,18 @@ public ConnectTransportException(StreamInput in) throws IOException { } } + /** + * The ES REST API is a gateway to a single or multiple clusters. If there is an error connecting to other servers, then we should + * return a 502 BAD_GATEWAY status code instead of the parent class' 500 INTERNAL_SERVER_ERROR. Clients tend to retry on a 502 but not + * on a 500, and retrying may help on a connection error. + * + * @return a {@link RestStatus#BAD_GATEWAY} code + */ + @Override + public final RestStatus status() { + return RestStatus.BAD_GATEWAY; + } + @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index d5047a61e4606..eb2eab75d3fe3 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -303,6 +303,7 @@ public void onFailure(Exception e) { new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ); diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 2c198caf22354..854072c49e354 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -505,6 +505,7 @@ private static DiscoveryNode resolveSeedNode(String clusterAlias, String address var seedVersion = new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ); if (proxyAddress == null || proxyAddress.isEmpty()) { diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 08db0822dfef5..2016f59b58a3e 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -135,4 +135,5 @@ 8.15.5,8702003 8.16.0,8772001 8.16.1,8772004 +8.16.2,8772004 8.17.0,8797002 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index afe696f31d323..3bfeeded6494c 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -135,4 +135,5 @@ 8.15.5,8512000 8.16.0,8518000 8.16.1,8518000 +8.16.2,8518000 8.17.0,8521000 diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 2abe4157583cd..31f54f9a16359 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -409,6 +409,7 @@ public void testConnectTransportException() throws IOException { ex = serialize(new ConnectTransportException(node, "msg", "action", new NullPointerException())); assertEquals("[][" + transportAddress + "][action] msg", ex.getMessage()); assertThat(ex.getCause(), instanceOf(NullPointerException.class)); + assertEquals(RestStatus.BAD_GATEWAY, ex.status()); } public void testSearchPhaseExecutionException() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index b59cc13a20ff2..69cff0fc45ac3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -127,6 +127,7 @@ public void testToXContentWithDeprecatedClusterState() { ], "version": "%s", "min_index_version": %s, + "min_read_only_index_version": %s, "max_index_version": %s } }, @@ -218,6 +219,7 @@ public void testToXContentWithDeprecatedClusterState() { clusterState.getNodes().get("node0").getEphemeralId(), Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), IndexVersion.current(), IndexVersion.current() diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java index 2c6e273bb6e23..ba0f04d174f43 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -19,6 +19,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexWriteLoad; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -110,6 +112,7 @@ public void testCalculateValidations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -143,8 +146,9 @@ public Set getFeatures() { // cluster doesn't have feature ClusterState stateNoFeature = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder()).build(); + Settings settings = Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(); DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( - Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(), + settings, clusterService, new FeatureService(List.of()), () -> now @@ -155,15 +159,16 @@ public Set getFeatures() { } { + Settings settings = Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList( + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), + List.of("foo", dataStreamName + "*") + ) + .build(); // patterns are configured to exclude the current data stream DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( - Settings.builder() - .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) - .putList( - DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), - List.of("foo", dataStreamName + "*") - ) - .build(), + settings, clusterService, new FeatureService(List.of()), () -> now @@ -199,6 +204,7 @@ public void testCalculateIncreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -237,6 +243,7 @@ public void testCalculateIncreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -275,6 +282,7 @@ public void testCalculateIncreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -313,6 +321,7 @@ public void testCalculateDecreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -353,6 +362,7 @@ public void testCalculateDecreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -401,6 +411,7 @@ public void testCalculateDecreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -447,6 +458,7 @@ public void testCalculateDecreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -487,6 +499,7 @@ public void testCalculateDecreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 668aea70c23f2..5f4426b02ce1a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -213,6 +213,7 @@ public void testToXContent() throws IOException { ], "version": "%s", "min_index_version":%s, + "min_read_only_index_version":%s, "max_index_version":%s } }, @@ -389,6 +390,7 @@ public void testToXContent() throws IOException { ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), @@ -488,6 +490,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ], "version" : "%s", "min_index_version" : %s, + "min_read_only_index_version" : %s, "max_index_version" : %s } }, @@ -663,6 +666,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), @@ -762,6 +766,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ], "version" : "%s", "min_index_version" : %s, + "min_read_only_index_version" : %s, "max_index_version" : %s } }, @@ -943,6 +948,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 27775270a83eb..492a142492e18 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; @@ -46,11 +47,13 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect; @@ -227,6 +230,227 @@ public Set getFeatures() { ); } + @SuppressForbidden(reason = "we need to actually check what is in cluster state") + private static Map> getRecordedNodeFeatures(ClusterState state) { + return state.clusterFeatures().nodeFeatures(); + } + + private static Version nextMajor() { + return Version.fromId((Version.CURRENT.major + 1) * 1_000_000 + 99); + } + + public void testCanJoinClusterWithAssumedFeatures() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("af1", true), new NodeFeature("af2", true)); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + Map> features = new HashMap<>(); + features.put(masterNode.getId(), Set.of("f1", "af1", "af2")); + features.put(otherNode.getId(), Set.of("f1", "af1", "af2")); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(features) + .build(); + + // it is valid for major+1 versions to join clusters assumed features still present + // this can happen in the process of marking, then removing, assumed features + // they should still be recorded appropriately + DiscoveryNode newNode = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNode, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1", "af2"), + TEST_REASON, + NO_FAILURE_LISTENER, + 0L + ) + ) + ); + features.put(newNode.getId(), Set.of("f1", "af2")); + + // extra final check that the recorded cluster features are as they should be + assertThat(getRecordedNodeFeatures(clusterState), equalTo(features)); + } + + public void testJoinClusterWithAssumedFeaturesDoesntAllowNonAssumed() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("af1", true)); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + Map> features = new HashMap<>(); + features.put(masterNode.getId(), Set.of("f1", "af1")); + features.put(otherNode.getId(), Set.of("f1", "af1")); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(features) + .build(); + + DiscoveryNode newNodeNextMajor = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNodeNextMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + NO_FAILURE_LISTENER, + 0L + ) + ) + ); + features.put(newNodeNextMajor.getId(), Set.of("f1")); + + // even though a next major has joined without af1, this doesnt allow the current major to join with af1 missing features + DiscoveryNode newNodeCurMajor = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + AtomicReference ex = new AtomicReference<>(); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNodeCurMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(ex::set), + 0L + ) + ) + ); + assertThat(ex.get().getMessage(), containsString("missing required features [af1]")); + + // a next major can't join missing non-assumed features + DiscoveryNode newNodeNextMajorMissing = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + ex.set(null); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNodeNextMajorMissing, + CompatibilityVersionsUtils.staticCurrent(), + Set.of(), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(ex::set), + 0L + ) + ) + ); + assertThat(ex.get().getMessage(), containsString("missing required features [f1]")); + + // extra final check that the recorded cluster features are as they should be, and newNodeNextMajor hasn't gained af1 + assertThat(getRecordedNodeFeatures(clusterState), equalTo(features)); + } + + /* + * Same as above but the current major missing features is processed in the same execution + */ + public void testJoinClusterWithAssumedFeaturesDoesntAllowNonAssumedSameExecute() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("af1", true)); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + Map> features = new HashMap<>(); + features.put(masterNode.getId(), Set.of("f1", "af1")); + features.put(otherNode.getId(), Set.of("f1", "af1")); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(features) + .build(); + + DiscoveryNode newNodeNextMajor = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + DiscoveryNode newNodeCurMajor = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode newNodeNextMajorMissing = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + // even though a next major could join, this doesnt allow the current major to join with missing features + // nor a next major missing non-assumed features + AtomicReference thisMajorEx = new AtomicReference<>(); + AtomicReference nextMajorEx = new AtomicReference<>(); + List tasks = List.of( + JoinTask.singleNode( + newNodeNextMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + NO_FAILURE_LISTENER, + 0L + ), + JoinTask.singleNode( + newNodeCurMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(thisMajorEx::set), + 0L + ), + JoinTask.singleNode( + newNodeNextMajorMissing, + CompatibilityVersionsUtils.staticCurrent(), + Set.of(), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(nextMajorEx::set), + 0L + ) + ); + if (randomBoolean()) { + // sometimes combine them together into a single task for completeness + tasks = List.of(new JoinTask(tasks.stream().flatMap(t -> t.nodeJoinTasks().stream()).toList(), false, 0L, null)); + } + + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful(clusterState, executor, tasks); + features.put(newNodeNextMajor.getId(), Set.of("f1")); + + assertThat(thisMajorEx.get().getMessage(), containsString("missing required features [af1]")); + assertThat(nextMajorEx.get().getMessage(), containsString("missing required features [f1]")); + + // extra check that the recorded cluster features are as they should be, and newNodeNextMajor hasn't gained af1 + assertThat(getRecordedNodeFeatures(clusterState), equalTo(features)); + } + public void testSuccess() { Settings.builder().build(); Metadata.Builder metaBuilder = Metadata.builder(); @@ -921,8 +1145,8 @@ public void testSetsNodeFeaturesWhenRejoining() throws Exception { .nodeFeatures(Map.of(masterNode.getId(), Set.of("f1", "f2"), rejoinNode.getId(), Set.of())) .build(); - assertThat(clusterState.clusterFeatures().clusterHasFeature(new NodeFeature("f1")), is(false)); - assertThat(clusterState.clusterFeatures().clusterHasFeature(new NodeFeature("f2")), is(false)); + assertThat(clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), new NodeFeature("f1")), is(false)); + assertThat(clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), new NodeFeature("f2")), is(false)); final var resultingState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( clusterState, @@ -939,8 +1163,8 @@ public void testSetsNodeFeaturesWhenRejoining() throws Exception { ) ); - assertThat(resultingState.clusterFeatures().clusterHasFeature(new NodeFeature("f1")), is(true)); - assertThat(resultingState.clusterFeatures().clusterHasFeature(new NodeFeature("f2")), is(true)); + assertThat(resultingState.clusterFeatures().clusterHasFeature(resultingState.nodes(), new NodeFeature("f1")), is(true)); + assertThat(resultingState.clusterFeatures().clusterHasFeature(resultingState.nodes(), new NodeFeature("f2")), is(true)); } private DesiredNodeWithStatus createActualizedDesiredNode() { diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java index 331b5d92ca94e..fa7633f0eaf75 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java @@ -31,6 +31,8 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.test.NodeRoles.nonRemoteClusterClientNode; import static org.elasticsearch.test.NodeRoles.remoteClusterClientNode; +import static org.elasticsearch.test.TransportVersionUtils.getPreviousVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -221,6 +223,7 @@ public void testDiscoveryNodeToXContent() { ], "version" : "%s", "min_index_version" : %s, + "min_read_only_index_version" : %s, "max_index_version" : %s } }""", @@ -228,6 +231,7 @@ public void testDiscoveryNodeToXContent() { withExternalId ? "test-external-id" : "test-name", Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ) @@ -250,4 +254,61 @@ public void testDiscoveryNodeToString() { assertThat(toString, containsString("{" + node.getBuildVersion() + "}")); assertThat(toString, containsString("{test-attr=val}"));// attributes } + + public void testDiscoveryNodeMinReadOnlyVersionSerialization() throws Exception { + var node = DiscoveryNodeUtils.create("_id", buildNewFakeTransportAddress(), VersionInformation.CURRENT); + + { + try (var out = new BytesStreamOutput()) { + out.setTransportVersion(TransportVersion.current()); + node.writeTo(out); + + try (var in = StreamInput.wrap(out.bytes().array())) { + in.setTransportVersion(TransportVersion.current()); + + var deserialized = new DiscoveryNode(in); + assertThat(deserialized.getId(), equalTo(node.getId())); + assertThat(deserialized.getAddress(), equalTo(node.getAddress())); + assertThat(deserialized.getMinIndexVersion(), equalTo(node.getMinIndexVersion())); + assertThat(deserialized.getMaxIndexVersion(), equalTo(node.getMaxIndexVersion())); + assertThat(deserialized.getMinReadOnlyIndexVersion(), equalTo(node.getMinReadOnlyIndexVersion())); + assertThat(deserialized.getVersionInformation(), equalTo(node.getVersionInformation())); + } + } + } + + { + var oldVersion = randomVersionBetween( + random(), + TransportVersions.MINIMUM_COMPATIBLE, + getPreviousVersion(TransportVersions.NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION) + ); + try (var out = new BytesStreamOutput()) { + out.setTransportVersion(oldVersion); + node.writeTo(out); + + try (var in = StreamInput.wrap(out.bytes().array())) { + in.setTransportVersion(oldVersion); + + var deserialized = new DiscoveryNode(in); + assertThat(deserialized.getId(), equalTo(node.getId())); + assertThat(deserialized.getAddress(), equalTo(node.getAddress())); + assertThat(deserialized.getMinIndexVersion(), equalTo(node.getMinIndexVersion())); + assertThat(deserialized.getMaxIndexVersion(), equalTo(node.getMaxIndexVersion())); + assertThat(deserialized.getMinReadOnlyIndexVersion(), equalTo(node.getMinIndexVersion())); + assertThat( + deserialized.getVersionInformation(), + equalTo( + new VersionInformation( + node.getBuildVersion(), + node.getMinIndexVersion(), + node.getMinIndexVersion(), + node.getMaxIndexVersion() + ) + ) + ); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java index 874a6a96313e4..a64303f376b20 100644 --- a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java +++ b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java @@ -9,8 +9,14 @@ package org.elasticsearch.features; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -69,6 +75,12 @@ public void testStateHasFeatures() { ); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNodeUtils.create("node1")) + .add(DiscoveryNodeUtils.create("node2")) + .add(DiscoveryNodeUtils.create("node3")) + ) .nodeFeatures( Map.of("node1", Set.of("f1", "f2", "nf1"), "node2", Set.of("f1", "f2", "nf2"), "node3", Set.of("f1", "f2", "nf1")) ) @@ -81,4 +93,33 @@ public void testStateHasFeatures() { assertFalse(service.clusterHasFeature(state, new NodeFeature("nf2"))); assertFalse(service.clusterHasFeature(state, new NodeFeature("nf3"))); } + + private static Version nextMajor() { + return Version.fromId((Version.CURRENT.major + 1) * 1_000_000 + 99); + } + + public void testStateHasAssumedFeatures() { + List specs = List.of( + new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2"), new NodeFeature("af1", true))) + ); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNodeUtils.create("node1")) + .add(DiscoveryNodeUtils.create("node2")) + .add( + DiscoveryNodeUtils.builder("node3") + .version(new VersionInformation(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current())) + .build() + ) + ) + .nodeFeatures(Map.of("node1", Set.of("f1", "af1"), "node2", Set.of("f1", "f2", "af1"), "node3", Set.of("f1", "f2"))) + .build(); + + FeatureService service = new FeatureService(specs); + assertTrue(service.clusterHasFeature(state, new NodeFeature("f1"))); + assertFalse(service.clusterHasFeature(state, new NodeFeature("f2"))); + assertTrue(service.clusterHasFeature(state, new NodeFeature("af1", true))); + } } diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java index 97f44f7480a72..92bfabf6f1972 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java @@ -77,8 +77,8 @@ public void setUp() throws Exception { clusterService = createClusterService(threadPool); localNodeId = clusterService.localNode().getId(); persistentTasksService = mock(PersistentTasksService.class); - featureService = new FeatureService(List.of(new HealthFeatures())); settings = Settings.builder().build(); + featureService = new FeatureService(List.of(new HealthFeatures())); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); } diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java index 8d060d94e4c21..b58ac513a6449 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java @@ -66,7 +66,6 @@ public void testRandomScoreFunctionWithSeedNoField() throws Exception { Mockito.when(context.getFieldType(IdFieldMapper.NAME)).thenReturn(new KeywordFieldMapper.KeywordFieldType(IdFieldMapper.NAME)); Mockito.when(context.isFieldMapped(IdFieldMapper.NAME)).thenReturn(true); builder.toFunction(context); - assertWarnings("As of version 7.0 Elasticsearch will require that a [field] parameter is provided when a [seed] is set"); } public void testRandomScoreFunctionWithSeed() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 38294fb030ed4..bf26326abafbf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -83,9 +83,9 @@ public class DateHistogramAggregatorTests extends DateHistogramAggregatorTestCas "2017-12-12T22:55:46" ); - public void testBooleanFieldDeprecated() throws IOException { + public void testBooleanFieldUnsupported() throws IOException { final String fieldName = "bogusBoolean"; - testCase(iw -> { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { Document d = new Document(); d.add(new SortedNumericDocValuesField(fieldName, 0)); iw.addDocument(d); @@ -95,8 +95,8 @@ public void testBooleanFieldDeprecated() throws IOException { new DateHistogramAggregationBuilder("name").calendarInterval(DateHistogramInterval.HOUR).field(fieldName), new BooleanFieldMapper.BooleanFieldType(fieldName) ) - ); - assertWarnings("Running DateHistogram aggregations on [boolean] fields is deprecated"); + )); + assertThat(e.getMessage(), equalTo("Field [bogusBoolean] of type [boolean] is not supported for aggregation [date_histogram]")); } public void testMatchNoDocs() throws IOException { diff --git a/test/fixtures/gcs-fixture/build.gradle b/test/fixtures/gcs-fixture/build.gradle index e8f1a2e15a4e0..6cf2e1ee52c2c 100644 --- a/test/fixtures/gcs-fixture/build.gradle +++ b/test/fixtures/gcs-fixture/build.gradle @@ -9,7 +9,6 @@ apply plugin: 'elasticsearch.java' description = 'Fixture for Google Cloud Storage service' -tasks.named("test").configure { enabled = false } dependencies { api project(':server') diff --git a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java index f6b52a32a9a1d..163712fb05a50 100644 --- a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java +++ b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java @@ -95,7 +95,7 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("GET /storage/v1/b/" + bucket + "/o*", request)) { // List Objects https://cloud.google.com/storage/docs/json_api/v1/objects/list final Map params = new HashMap<>(); - RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + RestUtils.decodeQueryString(exchange.getRequestURI(), params); final String prefix = params.getOrDefault("prefix", ""); final String delimiter = params.get("delimiter"); @@ -212,7 +212,7 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("POST /upload/storage/v1/b/" + bucket + "/*uploadType=resumable*", request)) { // Resumable upload initialization https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload final Map params = new HashMap<>(); - RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + RestUtils.decodeQueryString(exchange.getRequestURI(), params); final String blobName = params.get("name"); blobs.put(blobName, BytesArray.EMPTY); @@ -237,7 +237,7 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("PUT /upload/storage/v1/b/" + bucket + "/o?*uploadType=resumable*", request)) { // Resumable upload https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload final Map params = new HashMap<>(); - RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + RestUtils.decodeQueryString(exchange.getRequestURI(), params); final String blobName = params.get("test_blob_name"); if (blobs.containsKey(blobName) == false) { @@ -269,8 +269,6 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); } } finally { - int read = exchange.getRequestBody().read(); - assert read == -1 : "Request body should have been fully read here but saw [" + read + "]"; exchange.close(); } } diff --git a/test/fixtures/gcs-fixture/src/test/java/fixture/gcs/GoogleCloudStorageHttpHandlerTests.java b/test/fixtures/gcs-fixture/src/test/java/fixture/gcs/GoogleCloudStorageHttpHandlerTests.java new file mode 100644 index 0000000000000..0caaa983f76df --- /dev/null +++ b/test/fixtures/gcs-fixture/src/test/java/fixture/gcs/GoogleCloudStorageHttpHandlerTests.java @@ -0,0 +1,509 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.gcs; + +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpPrincipal; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.zip.GZIPOutputStream; + +public class GoogleCloudStorageHttpHandlerTests extends ESTestCase { + + private static final String HOST = "http://127.0.0.1:12345"; + private static final int RESUME_INCOMPLETE = 308; + + public void testRejectsBadUri() { + assertEquals( + RestStatus.NOT_FOUND.getStatus(), + handleRequest(new GoogleCloudStorageHttpHandler("bucket"), randomFrom("GET", "PUT", "POST", "DELETE", "HEAD"), "/not-in-bucket") + .status() + ); + } + + public void testCheckEndpoint() { + final var handler = new GoogleCloudStorageHttpHandler("bucket"); + + assertEquals( + RestStatus.OK, + handleRequest(handler, "GET", "/", BytesArray.EMPTY, Headers.of("Metadata-Flavor", "Google")).restStatus() + ); + } + + public void testSimpleObjectOperations() { + final var bucket = randomAlphaOfLength(10); + final var handler = new GoogleCloudStorageHttpHandler(bucket); + + assertEquals(RestStatus.NOT_FOUND, handleRequest(handler, "GET", "/download/storage/v1/b/" + bucket + "/o/blob").restStatus()); + + assertEquals( + new TestHttpResponse(RestStatus.OK, "{\"kind\":\"storage#objects\",\"items\":[],\"prefixes\":[]}"), + handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o") + ); + + // Multipart upload + final var body = randomAlphaOfLength(50); + assertEquals( + RestStatus.OK, + handleRequest( + handler, + "POST", + "/upload/storage/v1/b/" + bucket + "/?uploadType=multipart", + createGzipCompressedMultipartUploadBody(bucket, "path/blob", body) + ).restStatus() + ); + assertEquals( + new TestHttpResponse(RestStatus.OK, body), + handleRequest(handler, "GET", "/download/storage/v1/b/" + bucket + "/o/path/blob") + ); + + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#objects","items":[{"kind":"storage#object","bucket":"%s","name":"path/blob","id":"path/blob","size":"50"} + ],"prefixes":[]}""", bucket)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o")); + + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#objects","items":[{"kind":"storage#object","bucket":"%s","name":"path/blob","id":"path/blob","size":"50"} + ],"prefixes":[]}""", bucket)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o?prefix=path/")); + + assertEquals( + new TestHttpResponse(RestStatus.OK, """ + {"kind":"storage#objects","items":[],"prefixes":[]}"""), + handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o?prefix=path/other") + ); + + assertEquals( + new TestHttpResponse(RestStatus.OK, """ + --__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__ + Content-Length: 162 + Content-Type: application/http + content-id: 1 + content-transfer-encoding: binary + + HTTP/1.1 204 NO_CONTENT + + + + + --__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__ + """.replaceAll("\n", "\r\n")), + handleRequest( + handler, + "POST", + "/batch/storage/v1", + createBatchDeleteRequest(bucket, "path/blob"), + Headers.of("Content-Type", "mixed/multipart") + ) + ); + assertEquals( + RestStatus.OK, + handleRequest( + handler, + "POST", + "/batch/storage/v1", + createBatchDeleteRequest(bucket, "path/blob"), + Headers.of("Content-Type", "mixed/multipart") + ).restStatus() + ); + + assertEquals( + new TestHttpResponse(RestStatus.OK, """ + {"kind":"storage#objects","items":[],"prefixes":[]}"""), + handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o?prefix=path/") + ); + } + + public void testGetWithBytesRange() { + final var bucket = randomIdentifier(); + final var handler = new GoogleCloudStorageHttpHandler(bucket); + final var blobName = "blob_name_" + randomIdentifier(); + final var blobPath = "/download/storage/v1/b/" + bucket + "/o/" + blobName; + final var blobBytes = randomBytesReference(256); + + assertEquals( + RestStatus.OK, + handleRequest( + handler, + "POST", + "/upload/storage/v1/b/" + bucket + "/?uploadType=multipart", + createGzipCompressedMultipartUploadBody(bucket, blobName, blobBytes) + ).restStatus() + ); + + assertEquals( + "No Range", + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath) + ); + + var end = blobBytes.length() - 1; + assertEquals( + "Exact Range: bytes=0-" + end, + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(0, end)) + ); + + end = randomIntBetween(blobBytes.length() - 1, Integer.MAX_VALUE); + assertEquals( + "Larger Range: bytes=0-" + end, + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(0, end)) + ); + + var start = randomIntBetween(blobBytes.length(), Integer.MAX_VALUE - 1); + end = randomIntBetween(start, Integer.MAX_VALUE); + assertEquals( + "Invalid Range: bytes=" + start + '-' + end, + new TestHttpResponse(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, BytesArray.EMPTY, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(start, end)) + ); + + start = randomIntBetween(0, blobBytes.length() - 1); + var length = randomIntBetween(1, blobBytes.length() - start); + end = start + length - 1; + assertEquals( + "Range: bytes=" + start + '-' + end, + new TestHttpResponse(RestStatus.OK, blobBytes.slice(start, length), TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(start, end)) + ); + } + + public void testResumableUpload() { + final var bucket = randomIdentifier(); + final var handler = new GoogleCloudStorageHttpHandler(bucket); + final var blobName = "blob_name_" + randomIdentifier(); + + final var createUploadResponse = handleRequest( + handler, + "POST", + "/upload/storage/v1/b/" + bucket + "/?uploadType=resumable&name=" + blobName + ); + final var locationHeader = createUploadResponse.headers.getFirst("Location"); + final var sessionURI = locationHeader.substring(locationHeader.indexOf(HOST) + HOST.length()); + assertEquals(RestStatus.OK, createUploadResponse.restStatus()); + + final var part1 = randomAlphaOfLength(50); + final var uploadPart1Response = handleRequest(handler, "PUT", sessionURI, part1, contentRangeHeader(0, 50, null)); + assertEquals(new TestHttpResponse(RESUME_INCOMPLETE, rangeHeader(0, 50)), uploadPart1Response); + + assertEquals( + new TestHttpResponse(RESUME_INCOMPLETE, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "PUT", sessionURI, BytesArray.EMPTY, contentRangeHeader(null, null, null)) + ); + + final var part2 = randomAlphaOfLength(50); + final var uploadPart2Response = handleRequest(handler, "PUT", sessionURI, part2, contentRangeHeader(51, 100, null)); + assertEquals(new TestHttpResponse(RESUME_INCOMPLETE, rangeHeader(51, 100)), uploadPart2Response); + + final var part3 = randomAlphaOfLength(30); + final var uploadPart3Response = handleRequest(handler, "PUT", sessionURI, part3, contentRangeHeader(101, 130, 130)); + assertEquals(new TestHttpResponse(RestStatus.OK, TestHttpExchange.EMPTY_HEADERS), uploadPart3Response); + + // complete upload should be visible now + + // can download contents + assertEquals( + new TestHttpResponse(RestStatus.OK, part1 + part2 + part3), + handleRequest(handler, "GET", "/download/storage/v1/b/" + bucket + "/o/" + blobName) + ); + + // can see in listing + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#objects","items":[{"kind":"storage#object","bucket":"%s","name":"%s","id":"%s","size":"130"} + ],"prefixes":[]}""", bucket, blobName, blobName)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o")); + + // can get metadata + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#object","bucket":"%s","name":"%s","id":"%s","size":"130"} + """, bucket, blobName, blobName)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o/" + blobName)); + } + + private record TestHttpResponse(int status, BytesReference body, Headers headers) { + TestHttpResponse(RestStatus status, BytesReference body, Headers headers) { + this(status.getStatus(), body, headers); + } + + TestHttpResponse(RestStatus status, String body) { + this(status.getStatus(), new BytesArray(body.getBytes(StandardCharsets.UTF_8)), TestHttpExchange.EMPTY_HEADERS); + } + + TestHttpResponse(RestStatus status, Headers headers) { + this(status.getStatus(), BytesArray.EMPTY, headers); + } + + TestHttpResponse(int statusCode, Headers headers) { + this(statusCode, BytesArray.EMPTY, headers); + } + + RestStatus restStatus() { + return Objects.requireNonNull(RestStatus.fromCode(status)); + } + + @Override + public String toString() { + return "TestHttpResponse{" + "status=" + status + ", body={size=" + body.utf8ToString() + "}, headers=" + headers + '}'; + } + } + + private static TestHttpResponse handleRequest(GoogleCloudStorageHttpHandler handler, String method, String uri) { + return handleRequest(handler, method, uri, ""); + } + + private static TestHttpResponse handleRequest(GoogleCloudStorageHttpHandler handler, String method, String uri, String requestBody) { + return handleRequest(handler, method, uri, new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8))); + } + + private static TestHttpResponse handleRequest( + GoogleCloudStorageHttpHandler handler, + String method, + String uri, + String requestBody, + Headers headers + ) { + return handleRequest(handler, method, uri, new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8)), headers); + } + + private static TestHttpResponse handleRequest( + GoogleCloudStorageHttpHandler handler, + String method, + String uri, + BytesReference requestBody + ) { + return handleRequest(handler, method, uri, requestBody, TestHttpExchange.EMPTY_HEADERS); + } + + private static TestHttpResponse handleRequest( + GoogleCloudStorageHttpHandler handler, + String method, + String uri, + BytesReference requestBody, + Headers requestHeaders + ) { + final var httpExchange = new TestHttpExchange(method, uri, requestBody, requestHeaders); + try { + handler.handle(httpExchange); + } catch (IOException e) { + fail(e); + } + assertNotEquals(0, httpExchange.getResponseCode()); + var responseHeaders = new Headers(); + httpExchange.getResponseHeaders().forEach((header, values) -> { + // com.sun.net.httpserver.Headers.Headers() normalize keys + if ("Range".equals(header) || "Content-range".equals(header) || "Location".equals(header)) { + responseHeaders.put(header, List.copyOf(values)); + } + }); + return new TestHttpResponse(httpExchange.getResponseCode(), httpExchange.getResponseBodyContents(), responseHeaders); + } + + private static Headers contentRangeHeader(@Nullable Integer startInclusive, @Nullable Integer endInclusive, @Nullable Integer limit) { + final String rangeString = startInclusive != null && endInclusive != null ? startInclusive + "-" + endInclusive : "*"; + final String limitString = limit == null ? "*" : limit.toString(); + return Headers.of("Content-Range", "bytes " + rangeString + "/" + limitString); + } + + private static Headers rangeHeader(long start, long end) { + return Headers.of("Range", Strings.format("bytes=%d-%d", start, end)); + } + + private static BytesReference createGzipCompressedMultipartUploadBody(String bucketName, String path, String content) { + return createGzipCompressedMultipartUploadBody(bucketName, path, new BytesArray(content.getBytes(StandardCharsets.UTF_8))); + } + + private static BytesReference createGzipCompressedMultipartUploadBody(String bucketName, String path, BytesReference content) { + final String metadataString = Strings.format("{\"bucket\":\"%s\", \"name\":\"%s\"}", bucketName, path); + final BytesReference header = new BytesArray(Strings.format(""" + --__END_OF_PART__a607a67c-6df7-4b87-b8a1-81f639a75a97__ + Content-Length: %d + Content-Type: application/json; charset=UTF-8 + content-transfer-encoding: binary + + %s + --__END_OF_PART__a607a67c-6df7-4b87-b8a1-81f639a75a97__ + Content-Type: application/octet-stream + content-transfer-encoding: binary + + """.replaceAll("\n", "\r\n"), metadataString.length(), metadataString).getBytes(StandardCharsets.UTF_8)); + + final BytesReference footer = new BytesArray(""" + + --__END_OF_PART__a607a67c-6df7-4b87-b8a1-81f639a75a97__-- + """.replaceAll("\n", "\r\n")); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try (GZIPOutputStream gzipOutputStream = new GZIPOutputStream(out)) { + gzipOutputStream.write(BytesReference.toBytes(CompositeBytesReference.of(header, content, footer))); + } catch (IOException e) { + fail(e); + } + return new BytesArray(out.toByteArray()); + } + + private static String createBatchDeleteRequest(String bucketName, String... paths) { + final String deleteRequestTemplate = """ + DELETE %s/storage/v1/b/%s/o/%s HTTP/1.1 + Authorization: Bearer foo + x-goog-api-client: gl-java/23.0.0 gdcl/2.1.1 mac-os-x/15.2 + + + """; + final String partTemplate = """ + --__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__ + Content-Length: %d + Content-Type: application/http + content-id: %d + content-transfer-encoding: binary + + %s + """; + StringBuilder builder = new StringBuilder(); + AtomicInteger contentId = new AtomicInteger(); + Arrays.stream(paths).forEach(p -> { + final String deleteRequest = Strings.format(deleteRequestTemplate, HOST, bucketName, p); + final String part = Strings.format(partTemplate, deleteRequest.length(), contentId.incrementAndGet(), deleteRequest); + builder.append(part); + }); + builder.append("--__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__"); + return builder.toString(); + } + + private static class TestHttpExchange extends HttpExchange { + + private static final Headers EMPTY_HEADERS = new Headers(); + + private final String method; + private final URI uri; + private final BytesReference requestBody; + private final Headers requestHeaders; + + private final Headers responseHeaders = new Headers(); + private final BytesStreamOutput responseBody = new BytesStreamOutput(); + private int responseCode; + + TestHttpExchange(String method, String uri, BytesReference requestBody, Headers requestHeaders) { + this.method = method; + this.uri = URI.create(uri); + this.requestBody = requestBody; + this.requestHeaders = new Headers(requestHeaders); + this.requestHeaders.add("Host", HOST); + } + + @Override + public Headers getRequestHeaders() { + return requestHeaders; + } + + @Override + public Headers getResponseHeaders() { + return responseHeaders; + } + + @Override + public URI getRequestURI() { + return uri; + } + + @Override + public String getRequestMethod() { + return method; + } + + @Override + public HttpContext getHttpContext() { + return null; + } + + @Override + public void close() {} + + @Override + public InputStream getRequestBody() { + try { + return requestBody.streamInput(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public OutputStream getResponseBody() { + return responseBody; + } + + @Override + public void sendResponseHeaders(int rCode, long responseLength) { + this.responseCode = rCode; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public int getResponseCode() { + return responseCode; + } + + public BytesReference getResponseBodyContents() { + return responseBody.bytes(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public String getProtocol() { + return "HTTP/1.1"; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public void setAttribute(String name, Object value) { + fail("setAttribute not implemented"); + } + + @Override + public void setStreams(InputStream i, OutputStream o) { + fail("setStreams not implemented"); + } + + @Override + public HttpPrincipal getPrincipal() { + fail("getPrincipal not implemented"); + throw new UnsupportedOperationException("getPrincipal not implemented"); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java index 64f8fa88762b8..20368753eac1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java @@ -76,6 +76,7 @@ public static class Builder { private BuildVersion buildVersion; private Version version; private IndexVersion minIndexVersion; + private IndexVersion minReadOnlyIndexVersion; private IndexVersion maxIndexVersion; private String externalId; @@ -125,16 +126,23 @@ public Builder version(Version version, IndexVersion minIndexVersion, IndexVersi this.buildVersion = BuildVersion.fromVersionId(version.id()); this.version = version; this.minIndexVersion = minIndexVersion; + this.minReadOnlyIndexVersion = minIndexVersion; this.maxIndexVersion = maxIndexVersion; return this; } - public Builder version(BuildVersion version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) { + public Builder version( + BuildVersion version, + IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, + IndexVersion maxIndexVersion + ) { // see comment in VersionInformation assert version.equals(BuildVersion.current()); this.buildVersion = version; this.version = Version.CURRENT; this.minIndexVersion = minIndexVersion; + this.minReadOnlyIndexVersion = minReadOnlyIndexVersion; this.maxIndexVersion = maxIndexVersion; return this; } @@ -143,6 +151,7 @@ public Builder version(VersionInformation versions) { this.buildVersion = versions.buildVersion(); this.version = versions.nodeVersion(); this.minIndexVersion = versions.minIndexVersion(); + this.minReadOnlyIndexVersion = versions.minReadOnlyIndexVersion(); this.maxIndexVersion = versions.maxIndexVersion(); return this; } @@ -170,10 +179,10 @@ public DiscoveryNode build() { } VersionInformation versionInfo; - if (minIndexVersion == null || maxIndexVersion == null) { + if (minIndexVersion == null || minReadOnlyIndexVersion == null || maxIndexVersion == null) { versionInfo = VersionInformation.inferVersions(version); } else { - versionInfo = new VersionInformation(buildVersion, version, minIndexVersion, maxIndexVersion); + versionInfo = new VersionInformation(buildVersion, version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); } return new DiscoveryNode(name, id, ephemeralId, hostName, hostAddress, address, attributes, roles, versionInfo, externalId); diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index fb37fb3575551..aa6e8de4ec27c 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -94,5 +94,6 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") task.skipTest("esql/61_enrich_ip/Invalid IP strings", "We switched from exceptions to null+warnings for ENRICH runtime errors") task.skipTest("esql/180_match_operator/match with non text field", "Match operator can now be used on non-text fields") + task.skipTest("esql/180_match_operator/match with functions", "Error message changed") }) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java index e9e4e90421adc..35cba890e5e0c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java @@ -90,7 +90,8 @@ public SparseVectorQueryBuilder( : (this.shouldPruneTokens ? new TokenPruningConfig() : null)); this.weightedTokensSupplier = null; - if (queryVectors == null ^ inferenceId == null == false) { + // Preserve BWC error messaging + if (queryVectors != null && inferenceId != null) { throw new IllegalArgumentException( "[" + NAME @@ -98,18 +99,24 @@ public SparseVectorQueryBuilder( + QUERY_VECTOR_FIELD.getPreferredName() + "] or [" + INFERENCE_ID_FIELD.getPreferredName() - + "]" + + "] for " + + ALLOWED_FIELD_TYPE + + " fields" ); } - if (inferenceId != null && query == null) { + + // Preserve BWC error messaging + if ((queryVectors == null) == (query == null)) { throw new IllegalArgumentException( "[" + NAME - + "] requires [" - + QUERY_FIELD.getPreferredName() - + "] when [" + + "] requires one of [" + + QUERY_VECTOR_FIELD.getPreferredName() + + "] or [" + INFERENCE_ID_FIELD.getPreferredName() - + "] is specified" + + "] for " + + ALLOWED_FIELD_TYPE + + " fields" ); } } @@ -143,6 +150,14 @@ public List getQueryVectors() { return queryVectors; } + public String getInferenceId() { + return inferenceId; + } + + public String getQuery() { + return query; + } + public boolean shouldPruneTokens() { return shouldPruneTokens; } @@ -176,7 +191,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep } builder.endObject(); } else { - builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + if (inferenceId != null) { + builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + } builder.field(QUERY_FIELD.getPreferredName(), query); } builder.field(PRUNE_FIELD.getPreferredName(), shouldPruneTokens); @@ -228,6 +245,11 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { shouldPruneTokens, tokenPruningConfig ); + } else if (inferenceId == null) { + // Edge case, where inference_id was not specified in the request, + // but we did not intercept this and rewrite to a query o field with + // pre-configured inference. So we trap here and output a nicer error message. + throw new IllegalArgumentException("inference_id required to perform vector search on query string"); } // TODO move this to xpack core and use inference APIs diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java index a5c1ba45d90b7..af557ed6b7f82 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java @@ -260,16 +260,16 @@ public void testIllegalValues() { { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new SparseVectorQueryBuilder("field name", null, "model id") + () -> new SparseVectorQueryBuilder("field name", null, null) ); - assertEquals("[sparse_vector] requires one of [query_vector] or [inference_id]", e.getMessage()); + assertEquals("[sparse_vector] requires one of [query_vector] or [inference_id] for sparse_vector fields", e.getMessage()); } { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> new SparseVectorQueryBuilder("field name", "model text", null) ); - assertEquals("[sparse_vector] requires [query] when [inference_id] is specified", e.getMessage()); + assertEquals("[sparse_vector] requires one of [query_vector] or [inference_id] for sparse_vector fields", e.getMessage()); } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json index 4415afe50a998..e0bde4715839e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json @@ -5,7 +5,15 @@ }, "dynamic_templates" : [ { - "strings_as_keywords" : { + "map_objects": { + "match_mapping_type": "object", + "mapping": { + "type": "object" + } + } + }, + { + "non_objects_as_keywords" : { "match" : "*", "mapping" : { "type" : "keyword" diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java index 90244d9b2c019..3557114e2f4c7 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java @@ -33,6 +33,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + public abstract class BaseEqlSpecTestCase extends RemoteClusterAwareEqlRestTestCase { protected static final String PARAM_FORMATTING = "%2$s"; @@ -52,6 +55,9 @@ public abstract class BaseEqlSpecTestCase extends RemoteClusterAwareEqlRestTestC */ private final int size; private final int maxSamplesPerKey; + private final Boolean allowPartialSearchResults; + private final Boolean allowPartialSequenceResults; + private final Boolean expectShardFailures; @Before public void setup() throws Exception { @@ -104,7 +110,16 @@ protected static List asArray(List specs) { } results.add( - new Object[] { spec.query(), name, spec.expectedEventIds(), spec.joinKeys(), spec.size(), spec.maxSamplesPerKey() } + new Object[] { + spec.query(), + name, + spec.expectedEventIds(), + spec.joinKeys(), + spec.size(), + spec.maxSamplesPerKey(), + spec.allowPartialSearchResults(), + spec.allowPartialSequenceResults(), + spec.expectShardFailures() } ); } @@ -118,7 +133,10 @@ protected static List asArray(List specs) { List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { this.index = index; @@ -128,6 +146,9 @@ protected static List asArray(List specs) { this.joinKeys = joinKeys; this.size = size == null ? -1 : size; this.maxSamplesPerKey = maxSamplesPerKey == null ? -1 : maxSamplesPerKey; + this.allowPartialSearchResults = allowPartialSearchResults; + this.allowPartialSequenceResults = allowPartialSequenceResults; + this.expectShardFailures = expectShardFailures; } public void test() throws Exception { @@ -137,6 +158,7 @@ public void test() throws Exception { private void assertResponse(ObjectPath response) throws Exception { List> events = response.evaluate("hits.events"); List> sequences = response.evaluate("hits.sequences"); + Object shardFailures = response.evaluate("shard_failures"); if (events != null) { assertEvents(events); @@ -145,6 +167,7 @@ private void assertResponse(ObjectPath response) throws Exception { } else { fail("No events or sequences found"); } + assertShardFailures(shardFailures); } protected ObjectPath runQuery(String index, String query) throws Exception { @@ -163,6 +186,32 @@ protected ObjectPath runQuery(String index, String query) throws Exception { if (maxSamplesPerKey > 0) { builder.field("max_samples_per_key", maxSamplesPerKey); } + boolean allowPartialResultsInBody = randomBoolean(); + if (allowPartialSearchResults != null) { + if (allowPartialResultsInBody) { + builder.field("allow_partial_search_results", String.valueOf(allowPartialSearchResults)); + if (allowPartialSequenceResults != null) { + builder.field("allow_partial_sequence_results", String.valueOf(allowPartialSequenceResults)); + } + } else { + // these will be overwritten by the path params, that have higher priority than the query (JSON body) params + if (allowPartialSearchResults != null) { + builder.field("allow_partial_search_results", randomBoolean()); + } + if (allowPartialSequenceResults != null) { + builder.field("allow_partial_sequence_results", randomBoolean()); + } + } + } else { + // Tests that don't specify a setting for these parameters should always pass. + // These params should be irrelevant. + if (randomBoolean()) { + builder.field("allow_partial_search_results", randomBoolean()); + } + if (randomBoolean()) { + builder.field("allow_partial_sequence_results", randomBoolean()); + } + } builder.endObject(); Request request = new Request("POST", "/" + index + "/_eql/search"); @@ -170,6 +219,23 @@ protected ObjectPath runQuery(String index, String query) throws Exception { if (ccsMinimizeRoundtrips != null) { request.addParameter("ccs_minimize_roundtrips", ccsMinimizeRoundtrips.toString()); } + if (allowPartialSearchResults != null) { + if (allowPartialResultsInBody == false) { + request.addParameter("allow_partial_search_results", String.valueOf(allowPartialSearchResults)); + if (allowPartialSequenceResults != null) { + request.addParameter("allow_partial_sequence_results", String.valueOf(allowPartialSequenceResults)); + } + } + } else { + // Tests that don't specify a setting for these parameters should always pass. + // These params should be irrelevant. + if (randomBoolean()) { + request.addParameter("allow_partial_search_results", String.valueOf(randomBoolean())); + } + if (randomBoolean()) { + request.addParameter("allow_partial_sequence_results", String.valueOf(randomBoolean())); + } + } int timeout = Math.toIntExact(timeout().millis()); RequestConfig config = RequestConfig.copy(RequestConfig.DEFAULT) .setConnectionRequestTimeout(timeout) @@ -182,6 +248,20 @@ protected ObjectPath runQuery(String index, String query) throws Exception { return ObjectPath.createFromResponse(client().performRequest(request)); } + private void assertShardFailures(Object shardFailures) { + if (expectShardFailures != null) { + if (expectShardFailures) { + assertNotNull(shardFailures); + List list = (List) shardFailures; + assertThat(list.size(), is(greaterThan(0))); + } else { + assertNull(shardFailures); + } + } else { + assertNull(shardFailures); + } + } + private void assertEvents(List> events) { assertNotNull(events); logger.debug("Events {}", new Object() { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java index 1d51af574c810..4618bd8f4ff3d 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java @@ -52,6 +52,7 @@ */ public class DataLoader { public static final String TEST_INDEX = "endgame-140"; + public static final String TEST_SHARD_FAILURES_INDEX = "endgame-shard-failures"; public static final String TEST_EXTRA_INDEX = "extra"; public static final String TEST_NANOS_INDEX = "endgame-140-nanos"; public static final String TEST_SAMPLE = "sample1,sample2,sample3"; @@ -103,6 +104,11 @@ public static void loadDatasetIntoEs(RestClient client, CheckedBiFunction eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_NANOS_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_NANOS_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,9 +54,23 @@ public EqlDateNanosSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java index 292fe6c895cee..cc858ded25f37 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java @@ -27,9 +27,23 @@ public EqlExtraSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_EXTRA_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_EXTRA_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,9 +54,23 @@ public EqlExtraSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java index cdda9e9e068f5..f62c2b29101db 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java @@ -27,9 +27,23 @@ public EqlMissingEventsSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_MISSING_EVENTS_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_MISSING_EVENTS_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,9 +54,23 @@ public EqlMissingEventsSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java index 6471e264a92fa..a38ccacb42f5f 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java @@ -21,9 +21,23 @@ public EqlSampleMultipleEntriesTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_SAMPLE_MULTI, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_SAMPLE_MULTI, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } public EqlSampleMultipleEntriesTestCase( @@ -33,9 +47,23 @@ public EqlSampleMultipleEntriesTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java index dfae73b3602a7..4748bd0e3307b 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java @@ -15,8 +15,29 @@ public abstract class EqlSampleTestCase extends BaseEqlSpecTestCase { - public EqlSampleTestCase(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - this(TEST_SAMPLE, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSampleTestCase( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + this( + TEST_SAMPLE, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } public EqlSampleTestCase( @@ -26,9 +47,23 @@ public EqlSampleTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java index db7ee05ff2239..4dd617bac0abd 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java @@ -30,6 +30,9 @@ public class EqlSpec { private Integer size; private Integer maxSamplesPerKey; + private Boolean allowPartialSearchResults; + private Boolean allowPartialSequenceResults; + private Boolean expectShardFailures; public String name() { return name; @@ -103,6 +106,30 @@ public void maxSamplesPerKey(Integer maxSamplesPerKey) { this.maxSamplesPerKey = maxSamplesPerKey; } + public Boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public void allowPartialSearchResults(Boolean allowPartialSearchResults) { + this.allowPartialSearchResults = allowPartialSearchResults; + } + + public Boolean allowPartialSequenceResults() { + return allowPartialSequenceResults; + } + + public void allowPartialSequenceResults(Boolean allowPartialSequenceResults) { + this.allowPartialSequenceResults = allowPartialSequenceResults; + } + + public Boolean expectShardFailures() { + return expectShardFailures; + } + + public void expectShardFailures(Boolean expectShardFailures) { + this.expectShardFailures = expectShardFailures; + } + @Override public String toString() { String str = ""; @@ -132,7 +159,15 @@ public String toString() { if (maxSamplesPerKey != null) { str = appendWithComma(str, "max_samples_per_key", "" + maxSamplesPerKey); } - + if (allowPartialSearchResults != null) { + str = appendWithComma(str, "allow_partial_search_results", String.valueOf(allowPartialSearchResults)); + } + if (allowPartialSequenceResults != null) { + str = appendWithComma(str, "allow_partial_sequence_results", String.valueOf(allowPartialSequenceResults)); + } + if (expectShardFailures != null) { + str = appendWithComma(str, "expect_shard_failures", String.valueOf(expectShardFailures)); + } return str; } @@ -150,12 +185,22 @@ public boolean equals(Object other) { return Objects.equals(this.query(), that.query()) && Objects.equals(size, that.size) - && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey); + && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults) + && Objects.equals(allowPartialSequenceResults, that.allowPartialSequenceResults) + && Objects.equals(expectShardFailures, that.expectShardFailures); } @Override public int hashCode() { - return Objects.hash(this.query, size, maxSamplesPerKey); + return Objects.hash( + this.query, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } private static String appendWithComma(String str, String name, String append) { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecFailingShardsTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecFailingShardsTestCase.java new file mode 100644 index 0000000000000..c490a2f703dcc --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecFailingShardsTestCase.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.test.eql; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import java.util.List; + +import static org.elasticsearch.test.eql.DataLoader.TEST_INDEX; +import static org.elasticsearch.test.eql.DataLoader.TEST_SHARD_FAILURES_INDEX; + +public abstract class EqlSpecFailingShardsTestCase extends BaseEqlSpecTestCase { + + @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) + public static List readTestSpecs() throws Exception { + + // Load EQL validation specs + return asArray(EqlSpecLoader.load("/test_failing_shards.toml")); + } + + @Override + protected String tiebreaker() { + return "serial_event_id"; + } + + // constructor for "local" rest tests + public EqlSpecFailingShardsTestCase( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + this( + TEST_INDEX + "," + TEST_SHARD_FAILURES_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); + } + + // constructor for multi-cluster tests + public EqlSpecFailingShardsTestCase( + String index, + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearch, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearch, + allowPartialSequenceResults, + expectShardFailures + ); + } +} diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java index a1f555563e29c..f86107cf3bac5 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java @@ -76,6 +76,10 @@ private static Integer getInteger(TomlTable table, String key) { return null; } + private static Boolean getBoolean(TomlTable table, String key) { + return table.getBoolean(key); + } + private static List readFromStream(InputStream is, Set uniqueTestNames) throws Exception { List testSpecs = new ArrayList<>(); @@ -90,6 +94,9 @@ private static List readFromStream(InputStream is, Set uniqueTe spec.note(getTrimmedString(table, "note")); spec.description(getTrimmedString(table, "description")); spec.size(getInteger(table, "size")); + spec.allowPartialSearchResults(getBoolean(table, "allow_partial_search_results")); + spec.allowPartialSequenceResults(getBoolean(table, "allow_partial_sequence_results")); + spec.expectShardFailures(getBoolean(table, "expect_shard_failures")); List arr = table.getList("tags"); if (arr != null) { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java index 7113924f79029..62a3ea72fe51f 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java @@ -28,8 +28,29 @@ protected String tiebreaker() { } // constructor for "local" rest tests - public EqlSpecTestCase(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - this(TEST_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSpecTestCase( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearch, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + this( + TEST_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearch, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,8 +61,22 @@ public EqlSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearch, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearch, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.data b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.data new file mode 100644 index 0000000000000..18a1d05656d09 --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.data @@ -0,0 +1,14 @@ +[ + { + "event_subtype_full": "already_running", + "event_type": "process", + "event_type_full": "process_event", + "opcode": 3, + "pid": 0, + "process_name": "System Idle Process", + "serial_event_id": 10000, + "subtype": "create", + "timestamp": 117444736000000000, + "unique_pid": 1 + } +] diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.mapping b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.mapping new file mode 100644 index 0000000000000..3b5039f4098af --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.mapping @@ -0,0 +1,105 @@ +# Text patterns like "[runtime_random_keyword_type]" will get replaced at runtime with a random string type. +# See DataLoader class for pattern replacements. +{ + "runtime":{ + "broken":{ + "type": "long", + "script": { + "lang": "painless", + "source": "emit(doc['non_existing'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" + } + } + }, + "properties" : { + "command_line" : { + "type" : "[runtime_random_keyword_type]" + }, + "event_type" : { + "type" : "[runtime_random_keyword_type]" + }, + "event" : { + "properties" : { + "category" : { + "type" : "alias", + "path" : "event_type" + }, + "sequence" : { + "type" : "alias", + "path" : "serial_event_id" + } + } + }, + "md5" : { + "type" : "[runtime_random_keyword_type]" + }, + "parent_process_name": { + "type" : "[runtime_random_keyword_type]" + }, + "parent_process_path": { + "type" : "[runtime_random_keyword_type]" + }, + "pid" : { + "type" : "long" + }, + "ppid" : { + "type" : "long" + }, + "process_name": { + "type" : "[runtime_random_keyword_type]" + }, + "process_path": { + "type" : "[runtime_random_keyword_type]" + }, + "subtype" : { + "type" : "[runtime_random_keyword_type]" + }, + "timestamp" : { + "type" : "date" + }, + "@timestamp" : { + "type" : "date" + }, + "user" : { + "type" : "[runtime_random_keyword_type]" + }, + "user_name" : { + "type" : "[runtime_random_keyword_type]" + }, + "user_domain": { + "type" : "[runtime_random_keyword_type]" + }, + "hostname" : { + "type" : "text", + "fields" : { + "[runtime_random_keyword_type]" : { + "type" : "[runtime_random_keyword_type]", + "ignore_above" : 256 + } + } + }, + "opcode" : { + "type" : "long" + }, + "file_name" : { + "type" : "text", + "fields" : { + "[runtime_random_keyword_type]" : { + "type" : "[runtime_random_keyword_type]", + "ignore_above" : 256 + } + } + }, + "file_path" : { + "type" : "[runtime_random_keyword_type]" + }, + "serial_event_id" : { + "type" : "long" + }, + "source_address" : { + "type" : "ip" + }, + "exit_code" : { + "type" : "long" + } + } +} diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/test_failing_shards.toml b/x-pack/plugin/eql/qa/common/src/main/resources/test_failing_shards.toml new file mode 100644 index 0000000000000..a551c66fd48bd --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/resources/test_failing_shards.toml @@ -0,0 +1,173 @@ +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "eventQueryNoShardFailures" +query = 'process where serial_event_id == 1' +allow_partial_search_results = true +expected_event_ids = [1] +expect_shard_failures = false + + +[[queries]] +name = "eventQueryShardFailures" +query = 'process where serial_event_id == 1 or broken == 1' +allow_partial_search_results = true +expected_event_ids = [1] +expect_shard_failures = true + + +[[queries]] +name = "eventQueryShardFailuresOptionalField" +query = 'process where serial_event_id == 1 and ?optional_field_default_null == null or broken == 1' +allow_partial_search_results = true +expected_event_ids = [1] +expect_shard_failures = true + + +[[queries]] +name = "eventQueryShardFailuresOptionalFieldMatching" +query = 'process where serial_event_id == 2 and ?subtype == "create" or broken == 1' +allow_partial_search_results = true +expected_event_ids = [2] +expect_shard_failures = true + + +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "sequenceQueryNoShardFailures" +query = ''' +sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2] +''' +expected_event_ids = [1, 2] +expect_shard_failures = false + + +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "sequenceQueryNoShardFailuresAllowFalse" +query = ''' +sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = false +expected_event_ids = [1, 2] +expect_shard_failures = false + + +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "sequenceQueryNoShardFailuresAllowTrue" +query = ''' +sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [1, 2] +expect_shard_failures = false + + +[[queries]] +name = "sequenceQueryMissingShards" +query = ''' +sequence + [process where serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResults" +query = ''' +sequence + [process where serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptional" +query = ''' +sequence + [process where ?serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptional2" +query = ''' +sequence with maxspan=100000d + [process where serial_event_id == 1 and ?subtype == "create" or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptionalMissing" +query = ''' +sequence with maxspan=100000d + [process where serial_event_id == 1 and ?subtype == "create"] + ![process where broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, -1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptionalMissing2" +query = ''' +sequence with maxspan=100000d + [process where serial_event_id == 1 and ?subtype == "create" or broken == 1] + ![process where broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, -1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sampleQueryMissingShardsPartialResults" +query = ''' +sample by event_subtype_full + [process where serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sampleQueryMissingShardsPartialResultsOptional" +query = ''' +sample by event_subtype_full + [process where serial_event_id == 1 and ?subtype == "create" or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + diff --git a/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java b/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java index 2a29572374fa8..60c7fb1c7ad25 100644 --- a/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java +++ b/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java @@ -407,7 +407,16 @@ private void assertMultiValueFunctionQuery( for (int id : ids) { eventIds.add(String.valueOf(id)); } - request.setJsonEntity("{\"query\":\"" + query + "\"}"); + + StringBuilder payload = new StringBuilder("{\"query\":\"" + query + "\""); + if (randomBoolean()) { + payload.append(", \"allow_partial_search_results\": true"); + } + if (randomBoolean()) { + payload.append(", \"allow_partial_sequence_results\": true"); + } + payload.append("}"); + request.setJsonEntity(payload.toString()); assertResponse(query, eventIds, runEql(client, request)); testedFunctions.add(functionName); } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java index c20968871472f..5d6824232d80f 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlDateNanosIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterIndex(TEST_NANOS_INDEX), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlDateNanosIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterIndex(TEST_NANOS_INDEX), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java index 774c19d02adf0..79b095434814b 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlExtraIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterIndex(TEST_EXTRA_INDEX), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlExtraIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterIndex(TEST_EXTRA_INDEX), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java index 1502c250bd058..7673eec32ec55 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlSampleIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterPattern(TEST_SAMPLE), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSampleIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterPattern(TEST_SAMPLE), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java index 795fe4e103a31..ac6f7fe508c99 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java @@ -43,8 +43,22 @@ public EqlSampleMultipleEntriesIT( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(remoteClusterPattern(TEST_SAMPLE_MULTI), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + remoteClusterPattern(TEST_SAMPLE_MULTI), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java index 2cddecb644a1a..db0c03e8fdb6f 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlSpecIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterIndex(TEST_INDEX), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSpecIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterIndex(TEST_INDEX), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java index 1df10fde7fde5..5e1fa224de58d 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java @@ -27,7 +27,27 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlDateNanosIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlDateNanosIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java index 8af8fcac087b5..cb92eddeb0410 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java @@ -27,7 +27,27 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlExtraIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlExtraIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java index 05557fb4883b3..4f1faf3322e7f 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java @@ -27,8 +27,28 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlMissingEventsIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlMissingEventsIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java index dc2c653fad89e..c0bce3ffc9e4f 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java @@ -27,8 +27,28 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlSampleIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSampleIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java index af1ade9120bbd..f50ee36095ae0 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java @@ -33,9 +33,22 @@ public EqlSampleMultipleEntriesIT( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecFailingShardsIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecFailingShardsIT.java new file mode 100644 index 0000000000000..cf05811a77857 --- /dev/null +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecFailingShardsIT.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.eql.EqlSpecFailingShardsTestCase; +import org.junit.ClassRule; + +import java.util.List; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EqlSpecFailingShardsIT extends EqlSpecFailingShardsTestCase { + + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.CLUSTER; + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public EqlSpecFailingShardsIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); + } +} diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java index 7aac0ae336c8a..0aad5cc1b73da 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java @@ -27,7 +27,27 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlSpecIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSpecIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml index e49264d76d5e9..c7974f3b584b4 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml @@ -83,6 +83,34 @@ setup: id: 123 valid: true + - do: + indices.create: + index: eql_test_rebel + body: + mappings: + properties: + some_keyword: + type: keyword + runtime: + day_of_week: + type: keyword + script: + source: "throw new IllegalArgumentException(\"rebel shards\")" + - do: + bulk: + refresh: true + body: + - index: + _index: eql_test_rebel + _id: "1" + - event: + - category: process + "@timestamp": 2020-02-03T12:34:56Z + user: SYSTEM + id: 123 + valid: false + some_keyword: longer than normal + --- # Testing round-trip and the basic shape of the response "Execute some EQL.": @@ -478,3 +506,118 @@ setup: query: 'sequence with maxspan=10d [network where user == "ADMIN"] ![network where used == "SYSTEM"]' - match: { error.root_cause.0.type: "verification_exception" } - match: { error.root_cause.0.reason: "Found 1 problem\nline 1:75: Unknown column [used], did you mean [user]?" } + + +--- +"Execute query shard failures and with allow_partial_search_results": + - do: + eql.search: + index: eql_test* + body: + query: 'process where user == "SYSTEM" and day_of_week == "Monday"' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + allow_partial_search_results: true + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.user: "SYSTEM"} + - match: {hits.events.0._id: "1"} + - match: {hits.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.events.0.fields.id: [123]} + - match: {hits.events.0.fields.valid: [false]} + - match: {hits.events.0.fields.day_of_week: ["Monday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute query shard failures and with allow_partial_search_results as request param": + - do: + eql.search: + index: eql_test* + allow_partial_search_results: true + body: + query: 'process where user == "SYSTEM" and day_of_week == "Monday"' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.user: "SYSTEM"} + - match: {hits.events.0._id: "1"} + - match: {hits.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.events.0.fields.id: [123]} + - match: {hits.events.0.fields.valid: [false]} + - match: {hits.events.0.fields.day_of_week: ["Monday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute sequence with shard failures and allow_partial_search_results=true": + - do: + eql.search: + index: eql_test* + body: + query: 'sequence [process where user == "SYSTEM" and day_of_week == "Monday"] [process where user == "SYSTEM" and day_of_week == "Tuesday"]' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + allow_partial_search_results: true + + - match: {timed_out: false} + - match: {hits.total.value: 0} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute sequence with shard failures, allow_partial_search_results=true and allow_partial_sequence_results=true": + - do: + eql.search: + index: eql_test* + body: + query: 'sequence [process where user == "SYSTEM" and day_of_week == "Monday"] [process where user == "SYSTEM" and day_of_week == "Tuesday"]' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + allow_partial_search_results: true + allow_partial_sequence_results: true + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.sequences.0.events.0._source.user: "SYSTEM"} + - match: {hits.sequences.0.events.0._id: "1"} + - match: {hits.sequences.0.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.sequences.0.events.0.fields.id: [123]} + - match: {hits.sequences.0.events.0.fields.valid: [false]} + - match: {hits.sequences.0.events.0.fields.day_of_week: ["Monday"]} + - match: {hits.sequences.0.events.1._id: "2"} + - match: {hits.sequences.0.events.1.fields.@timestamp: ["1580819696000"]} + - match: {hits.sequences.0.events.1.fields.id: [123]} + - match: {hits.sequences.0.events.1.fields.valid: [true]} + - match: {hits.sequences.0.events.1.fields.day_of_week: ["Tuesday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute sequence with shard failures, allow_partial_search_results=true and allow_partial_sequence_results=true as query params": + - do: + eql.search: + index: eql_test* + allow_partial_search_results: true + allow_partial_sequence_results: true + body: + query: 'sequence [process where user == "SYSTEM" and day_of_week == "Monday"] [process where user == "SYSTEM" and day_of_week == "Tuesday"]' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.sequences.0.events.0._source.user: "SYSTEM"} + - match: {hits.sequences.0.events.0._id: "1"} + - match: {hits.sequences.0.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.sequences.0.events.0.fields.id: [123]} + - match: {hits.sequences.0.events.0.fields.valid: [false]} + - match: {hits.sequences.0.events.0.fields.day_of_week: ["Monday"]} + - match: {hits.sequences.0.events.1._id: "2"} + - match: {hits.sequences.0.events.1.fields.@timestamp: ["1580819696000"]} + - match: {hits.sequences.0.events.1.fields.id: [123]} + - match: {hits.sequences.0.events.1.fields.valid: [true]} + - match: {hits.sequences.0.events.1.fields.day_of_week: ["Tuesday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/CCSPartialResultsIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/CCSPartialResultsIT.java new file mode 100644 index 0000000000000..da6bb6180428b --- /dev/null +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/CCSPartialResultsIT.java @@ -0,0 +1,613 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql.action; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.xpack.eql.plugin.EqlPlugin; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class CCSPartialResultsIT extends AbstractMultiClustersTestCase { + + static String REMOTE_CLUSTER = "cluster_a"; + + protected Collection> nodePlugins(String cluster) { + return Collections.singletonList(LocalStateEQLXPackPlugin.class); + } + + protected final Client localClient() { + return client(LOCAL_CLUSTER); + } + + @Override + protected List remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + /** + * + * @return remote node name + */ + private String createSchema() { + final Client remoteClient = client(REMOTE_CLUSTER); + final String remoteNode = cluster(REMOTE_CLUSTER).startDataOnlyNode(); + final String remoteNode2 = cluster(REMOTE_CLUSTER).startDataOnlyNode(); + + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate("test-1-remote") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", remoteNode) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMapping("@timestamp", "type=date"), + TimeValue.timeValueSeconds(60) + ); + + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate("test-2-remote") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", remoteNode2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMapping("@timestamp", "type=date"), + TimeValue.timeValueSeconds(60) + ); + + for (int i = 0; i < 5; i++) { + int val = i * 2; + remoteClient.prepareIndex("test-1-remote") + .setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + for (int i = 0; i < 5; i++) { + int val = i * 2 + 1; + remoteClient.prepareIndex("test-2-remote") + .setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + + remoteClient.admin().indices().prepareRefresh().get(); + return remoteNode; + } + + // ------------------------------------------------------------------------ + // queries with full cluster (no missing shards) + // ------------------------------------------------------------------------ + + public void testNoFailures() throws ExecutionException, InterruptedException, IOException { + createSchema(); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("process where true") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + EqlSearchResponse response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(10)); + for (int i = 0; i < 10; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + i)); + } + assertThat(response.shardFailures().length, is(0)); + + // sequence query on both shards + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 0")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query with missing event on unavailable shard + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(0)); + + // sample query on both shards + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 0")); + assertThat(response.shardFailures().length, is(0)); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and allow_partial_sequence_result=true + // ------------------------------------------------------------------------ + + public void testAllowPartialSearchAndSequence_event() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("process where true") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + } + + public void testAllowPartialSearchAndSequence_sequence() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sequence query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(2).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearchAndSequence_sample() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sample query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and default allow_partial_sequence_results (ie. false) + // ------------------------------------------------------------------------ + + public void testAllowPartialSearch_event() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("process where true") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAllowPartialSearch_sequence() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sequence query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearch_sample() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sample query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and with default xpack.eql.default_allow_partial_results=true + // ------------------------------------------------------------------------ + + public void testClusterSetting_event() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + cluster(REMOTE_CLUSTER).client() + .execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ) + .get(); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*").query("process where true"); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + localClient().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sequence() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + cluster(REMOTE_CLUSTER).client() + .execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ) + .get(); + // sequence query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]"); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + localClient().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sample() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + cluster(REMOTE_CLUSTER).client() + .execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ) + .get(); + + // sample query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]"); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + localClient().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } +} diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/PartialSearchResultsIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/PartialSearchResultsIT.java new file mode 100644 index 0000000000000..9048d11f4eddf --- /dev/null +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/PartialSearchResultsIT.java @@ -0,0 +1,780 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql.action; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.eql.plugin.EqlAsyncGetResultAction; +import org.elasticsearch.xpack.eql.plugin.EqlPlugin; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class PartialSearchResultsIT extends AbstractEqlIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), MockTransportService.TestPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(randomIntBetween(100, 500))) + .build(); + } + + /** + * + * @return node name where the first index is + */ + private String createSchema() { + internalCluster().ensureAtLeastNumDataNodes(2); + final List dataNodes = internalCluster().clusterService() + .state() + .nodes() + .getDataNodes() + .values() + .stream() + .map(DiscoveryNode::getName) + .toList(); + final String assignedNodeForIndex1 = randomFrom(dataNodes); + + assertAcked( + indicesAdmin().prepareCreate("test-1") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", assignedNodeForIndex1) + .build() + ) + .setMapping("@timestamp", "type=date") + ); + assertAcked( + indicesAdmin().prepareCreate("test-2") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.exclude._name", assignedNodeForIndex1) + .build() + ) + .setMapping("@timestamp", "type=date") + ); + + for (int i = 0; i < 5; i++) { + int val = i * 2; + prepareIndex("test-1").setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + for (int i = 0; i < 5; i++) { + int val = i * 2 + 1; + prepareIndex("test-2").setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + refresh(); + return assignedNodeForIndex1; + } + + public void testNoFailures() throws Exception { + createSchema(); + + // ------------------------------------------------------------------------ + // queries with full cluster (no missing shards) + // ------------------------------------------------------------------------ + + // event query + var request = new EqlSearchRequest().indices("test-*") + .query("process where true") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + EqlSearchResponse response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(10)); + for (int i = 0; i < 10; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + i)); + } + assertThat(response.shardFailures().length, is(0)); + + // sequence query on both shards + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 0")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query with missing event on unavailable shard + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(0)); + + // sample query on both shards + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 0")); + assertThat(response.shardFailures().length, is(0)); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards. Let them fail + // allow_partial_sequence_results has no effect if allow_partial_sequence_results is not set to true. + // ------------------------------------------------------------------------ + + public void testFailures_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + shouldFail("process where true"); + + } + + public void testFailures_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + shouldFail("sequence [process where value == 1] [process where value == 2]"); + + // sequence query on the available shard only + shouldFail("sequence [process where value == 1] [process where value == 3]"); + + // sequence query on the unavailable shard only + shouldFail("sequence [process where value == 0] [process where value == 2]"); + + // sequence query with missing event on unavailable shard. + shouldFail("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]"); + } + + public void testFailures_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sample query on both shards + shouldFail("sample by key [process where value == 2] [process where value == 1]"); + + // sample query on the available shard only + shouldFail("sample by key [process where value == 3] [process where value == 1]"); + + // sample query on the unavailable shard only + shouldFail("sample by key [process where value == 2] [process where value == 0]"); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and allow_partial_sequence_result=true + // ------------------------------------------------------------------------ + + public void testAllowPartialSearchAndSequenceResults_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + var request = new EqlSearchRequest().indices("test-*").query("process where true").allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAllowPartialSearchAndSequenceResults_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(2).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearchAndSequenceResults_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sample query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and default allow_partial_sequence_results (ie. false) + // ------------------------------------------------------------------------ + + public void testAllowPartialSearchResults_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + var request = new EqlSearchRequest().indices("test-*").query("process where true").allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAllowPartialSearchResults_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearchResults_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sample query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, this time async, with missing shards and allow_partial_search_results=true + // and default allow_partial_sequence_results (ie. false) + // ------------------------------------------------------------------------ + + public void testAsyncAllowPartialSearchResults_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + var response = runAsync("process where true", true); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAsyncAllowPartialSearchResults_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + var response = runAsync("sequence [process where value == 1] [process where value == 2]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + response = runAsync("sequence [process where value == 1] [process where value == 3]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + response = runAsync("sequence [process where value == 0] [process where value == 2]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + response = runAsync( + "sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]", + true + ); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAsyncAllowPartialSearchResults_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + // sample query on both shards + var response = runAsync("sample by key [process where value == 2] [process where value == 1]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + response = runAsync("sample by key [process where value == 3] [process where value == 1]", true); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + response = runAsync("sample by key [process where value == 2] [process where value == 0]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and with default xpack.eql.default_allow_partial_results=true + // ------------------------------------------------------------------------ + + public void testClusterSetting_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ).get(); + + // event query + var request = new EqlSearchRequest().indices("test-*").query("process where true"); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ).get(); + // sequence query on both shards + var request = new EqlSearchRequest().indices("test-*").query("sequence [process where value == 1] [process where value == 2]"); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*").query("sequence [process where value == 1] [process where value == 3]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*").query("sequence [process where value == 0] [process where value == 2]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ).get(); + + // sample query on both shards + var request = new EqlSearchRequest().indices("test-*").query("sample by key [process where value == 2] [process where value == 1]"); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*").query("sample by key [process where value == 3] [process where value == 1]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*").query("sample by key [process where value == 2] [process where value == 0]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + private static EqlSearchResponse runAsync(String query, Boolean allowPartialSearchResults) throws InterruptedException, + ExecutionException { + EqlSearchRequest request; + EqlSearchResponse response; + request = new EqlSearchRequest().indices("test-*").query(query).waitForCompletionTimeout(TimeValue.ZERO); + if (allowPartialSearchResults != null) { + request = request.allowPartialSearchResults(allowPartialSearchResults); + } + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + while (response.isRunning()) { + GetAsyncResultRequest getResultsRequest = new GetAsyncResultRequest(response.id()).setKeepAlive(TimeValue.timeValueMinutes(10)) + .setWaitForCompletionTimeout(TimeValue.timeValueMillis(10)); + response = client().execute(EqlAsyncGetResultAction.INSTANCE, getResultsRequest).get(); + } + return response; + } + + private static void shouldFail(String query) throws InterruptedException { + EqlSearchRequest request = new EqlSearchRequest().indices("test-*").query(query); + if (randomBoolean()) { + request = request.allowPartialSearchResults(false); + } + if (randomBoolean()) { + request = request.allowPartialSequenceResults(randomBoolean()); + } + try { + client().execute(EqlSearchAction.INSTANCE, request).get(); + fail(); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(SearchPhaseExecutionException.class)); + } + } +} diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java index 0aeddd525e317..5804e11b72ff5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java @@ -63,6 +63,8 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re private List fetchFields; private Map runtimeMappings = emptyMap(); private int maxSamplesPerKey = RequestDefaults.MAX_SAMPLES_PER_KEY; + private Boolean allowPartialSearchResults; + private Boolean allowPartialSequenceResults; // Async settings private TimeValue waitForCompletionTimeout = null; @@ -83,6 +85,8 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re static final String KEY_FETCH_FIELDS = "fields"; static final String KEY_RUNTIME_MAPPINGS = "runtime_mappings"; static final String KEY_MAX_SAMPLES_PER_KEY = "max_samples_per_key"; + static final String KEY_ALLOW_PARTIAL_SEARCH_RESULTS = "allow_partial_search_results"; + static final String KEY_ALLOW_PARTIAL_SEQUENCE_RESULTS = "allow_partial_sequence_results"; static final ParseField FILTER = new ParseField(KEY_FILTER); static final ParseField TIMESTAMP_FIELD = new ParseField(KEY_TIMESTAMP_FIELD); @@ -97,6 +101,8 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re static final ParseField RESULT_POSITION = new ParseField(KEY_RESULT_POSITION); static final ParseField FETCH_FIELDS_FIELD = SearchSourceBuilder.FETCH_FIELDS_FIELD; static final ParseField MAX_SAMPLES_PER_KEY = new ParseField(KEY_MAX_SAMPLES_PER_KEY); + static final ParseField ALLOW_PARTIAL_SEARCH_RESULTS = new ParseField(KEY_ALLOW_PARTIAL_SEARCH_RESULTS); + static final ParseField ALLOW_PARTIAL_SEQUENCE_RESULTS = new ParseField(KEY_ALLOW_PARTIAL_SEQUENCE_RESULTS); private static final ObjectParser PARSER = objectParser(EqlSearchRequest::new); @@ -135,6 +141,13 @@ public EqlSearchRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { maxSamplesPerKey = in.readInt(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + allowPartialSearchResults = in.readOptionalBoolean(); + allowPartialSequenceResults = in.readOptionalBoolean(); + } else { + allowPartialSearchResults = false; + allowPartialSequenceResults = false; + } } @Override @@ -245,6 +258,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(KEY_RUNTIME_MAPPINGS, runtimeMappings); } builder.field(KEY_MAX_SAMPLES_PER_KEY, maxSamplesPerKey); + builder.field(KEY_ALLOW_PARTIAL_SEARCH_RESULTS, allowPartialSearchResults); + builder.field(KEY_ALLOW_PARTIAL_SEQUENCE_RESULTS, allowPartialSequenceResults); return builder; } @@ -279,6 +294,8 @@ protected static ObjectParser objectParser parser.declareField(EqlSearchRequest::fetchFields, EqlSearchRequest::parseFetchFields, FETCH_FIELDS_FIELD, ValueType.VALUE_ARRAY); parser.declareObject(EqlSearchRequest::runtimeMappings, (p, c) -> p.map(), SearchSourceBuilder.RUNTIME_MAPPINGS_FIELD); parser.declareInt(EqlSearchRequest::maxSamplesPerKey, MAX_SAMPLES_PER_KEY); + parser.declareBoolean(EqlSearchRequest::allowPartialSearchResults, ALLOW_PARTIAL_SEARCH_RESULTS); + parser.declareBoolean(EqlSearchRequest::allowPartialSequenceResults, ALLOW_PARTIAL_SEQUENCE_RESULTS); return parser; } @@ -427,6 +444,24 @@ public EqlSearchRequest maxSamplesPerKey(int maxSamplesPerKey) { return this; } + public Boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public EqlSearchRequest allowPartialSearchResults(Boolean val) { + this.allowPartialSearchResults = val; + return this; + } + + public Boolean allowPartialSequenceResults() { + return allowPartialSequenceResults; + } + + public EqlSearchRequest allowPartialSequenceResults(Boolean val) { + this.allowPartialSequenceResults = val; + return this; + } + private static List parseFetchFields(XContentParser parser) throws IOException { List result = new ArrayList<>(); Token token = parser.currentToken(); @@ -470,6 +505,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeInt(maxSamplesPerKey); } + if (out.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + out.writeOptionalBoolean(allowPartialSearchResults); + out.writeOptionalBoolean(allowPartialSequenceResults); + } } @Override @@ -496,7 +535,9 @@ public boolean equals(Object o) { && Objects.equals(resultPosition, that.resultPosition) && Objects.equals(fetchFields, that.fetchFields) && Objects.equals(runtimeMappings, that.runtimeMappings) - && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey); + && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults) + && Objects.equals(allowPartialSequenceResults, that.allowPartialSequenceResults); } @Override @@ -517,7 +558,9 @@ public int hashCode() { resultPosition, fetchFields, runtimeMappings, - maxSamplesPerKey + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults ); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index 2b7b8b074fa71..a4d93b7659970 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -7,8 +7,11 @@ package org.elasticsearch.xpack.eql.action; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -17,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; @@ -36,6 +40,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -54,6 +59,7 @@ public class EqlSearchResponse extends ActionResponse implements ToXContentObjec private final String asyncExecutionId; private final boolean isRunning; private final boolean isPartial; + private final ShardSearchFailure[] shardFailures; private static final class Fields { static final String TOOK = "took"; @@ -62,6 +68,7 @@ private static final class Fields { static final String ID = "id"; static final String IS_RUNNING = "is_running"; static final String IS_PARTIAL = "is_partial"; + static final String SHARD_FAILURES = "shard_failures"; } private static final ParseField TOOK = new ParseField(Fields.TOOK); @@ -70,8 +77,10 @@ private static final class Fields { private static final ParseField ID = new ParseField(Fields.ID); private static final ParseField IS_RUNNING = new ParseField(Fields.IS_RUNNING); private static final ParseField IS_PARTIAL = new ParseField(Fields.IS_PARTIAL); + private static final ParseField SHARD_FAILURES = new ParseField(Fields.SHARD_FAILURES); private static final InstantiatingObjectParser PARSER; + static { InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( "eql/search_response", @@ -84,11 +93,12 @@ private static final class Fields { parser.declareString(optionalConstructorArg(), ID); parser.declareBoolean(constructorArg(), IS_RUNNING); parser.declareBoolean(constructorArg(), IS_PARTIAL); + parser.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.EMPTY_ARRAY, SHARD_FAILURES); PARSER = parser.build(); } - public EqlSearchResponse(Hits hits, long tookInMillis, boolean isTimeout) { - this(hits, tookInMillis, isTimeout, null, false, false); + public EqlSearchResponse(Hits hits, long tookInMillis, boolean isTimeout, ShardSearchFailure[] shardFailures) { + this(hits, tookInMillis, isTimeout, null, false, false, shardFailures); } public EqlSearchResponse( @@ -97,7 +107,8 @@ public EqlSearchResponse( boolean isTimeout, String asyncExecutionId, boolean isRunning, - boolean isPartial + boolean isPartial, + ShardSearchFailure[] shardFailures ) { super(); this.hits = hits == null ? Hits.EMPTY : hits; @@ -106,6 +117,7 @@ public EqlSearchResponse( this.asyncExecutionId = asyncExecutionId; this.isRunning = isRunning; this.isPartial = isPartial; + this.shardFailures = shardFailures; } public EqlSearchResponse(StreamInput in) throws IOException { @@ -116,6 +128,11 @@ public EqlSearchResponse(StreamInput in) throws IOException { asyncExecutionId = in.readOptionalString(); isPartial = in.readBoolean(); isRunning = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + shardFailures = in.readArray(ShardSearchFailure::readShardSearchFailure, ShardSearchFailure[]::new); + } else { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } } public static EqlSearchResponse fromXContent(XContentParser parser) { @@ -130,6 +147,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(asyncExecutionId); out.writeBoolean(isPartial); out.writeBoolean(isRunning); + if (out.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + out.writeArray(shardFailures); + } } @Override @@ -147,6 +167,13 @@ private XContentBuilder innerToXContent(XContentBuilder builder, Params params) builder.field(IS_RUNNING.getPreferredName(), isRunning); builder.field(TOOK.getPreferredName(), tookInMillis); builder.field(TIMED_OUT.getPreferredName(), isTimeout); + if (CollectionUtils.isEmpty(shardFailures) == false) { + builder.startArray(SHARD_FAILURES.getPreferredName()); + for (ShardOperationFailedException shardFailure : ExceptionsHelper.groupBy(shardFailures)) { + shardFailure.toXContent(builder, params); + } + builder.endArray(); + } hits.toXContent(builder, params); return builder; } @@ -178,6 +205,10 @@ public boolean isPartial() { return isPartial; } + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -190,12 +221,13 @@ public boolean equals(Object o) { return Objects.equals(hits, that.hits) && Objects.equals(tookInMillis, that.tookInMillis) && Objects.equals(isTimeout, that.isTimeout) - && Objects.equals(asyncExecutionId, that.asyncExecutionId); + && Objects.equals(asyncExecutionId, that.asyncExecutionId) + && Arrays.equals(shardFailures, that.shardFailures); } @Override public int hashCode() { - return Objects.hash(hits, tookInMillis, isTimeout, asyncExecutionId); + return Objects.hash(hits, tookInMillis, isTimeout, asyncExecutionId, Arrays.hashCode(shardFailures)); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java index 2a1bc3b7adb67..0fc8e8c88d7d9 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.action; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.async.AsyncExecutionId; @@ -39,7 +40,8 @@ public EqlSearchResponse getCurrentResult() { false, getExecutionId().getEncoded(), true, - true + true, + ShardSearchFailure.EMPTY_ARRAY ); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java index b26c815c1a2b5..672d6b87a8dbb 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java @@ -167,7 +167,9 @@ public Executable assemble( criteria.subList(0, completionStage), criteria.get(completionStage), matcher, - listOfKeys + listOfKeys, + cfg.allowPartialSearchResults(), + cfg.allowPartialSequenceResults() ); return w; @@ -235,7 +237,8 @@ public Executable assemble(List> listOfKeys, List cfg.fetchSize(), limit, session.circuitBreaker(), - cfg.maxSamplesPerKey() + cfg.maxSamplesPerKey(), + cfg.allowPartialSearchResults() ); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java index 823cd04d25f45..9fecf958b9714 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.execution.payload; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.eql.session.Payload; @@ -14,10 +15,12 @@ public abstract class AbstractPayload implements Payload { private final boolean timedOut; private final TimeValue timeTook; + private ShardSearchFailure[] shardFailures; - protected AbstractPayload(boolean timedOut, TimeValue timeTook) { + protected AbstractPayload(boolean timedOut, TimeValue timeTook, ShardSearchFailure[] shardFailures) { this.timedOut = timedOut; this.timeTook = timeTook; + this.shardFailures = shardFailures; } @Override @@ -29,4 +32,9 @@ public boolean timedOut() { public TimeValue timeTook() { return timeTook; } + + @Override + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java index a7845ca62dccc..6471bc0814f70 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java @@ -20,7 +20,7 @@ public class EventPayload extends AbstractPayload { private final List values; public EventPayload(SearchResponse response) { - super(response.isTimedOut(), response.getTook()); + super(response.isTimedOut(), response.getTook(), response.getShardFailures()); SearchHits hits = response.getHits(); values = new ArrayList<>(hits.getHits().length); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java index 89f1c4d1eb041..b9b7cfd6b615a 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; @@ -35,6 +36,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -44,6 +46,7 @@ import static org.elasticsearch.common.Strings.EMPTY_ARRAY; import static org.elasticsearch.xpack.eql.execution.assembler.SampleQueryRequest.COMPOSITE_AGG_NAME; import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.prepareRequest; +import static org.elasticsearch.xpack.eql.util.SearchHitUtils.addShardFailures; public class SampleIterator implements Executable { @@ -58,6 +61,7 @@ public class SampleIterator implements Executable { private final Limit limit; private final int maxSamplesPerKey; private long startTime; + private Map shardFailures = new HashMap<>(); // ---------- CIRCUIT BREAKER ----------- @@ -84,13 +88,16 @@ public class SampleIterator implements Executable { */ private long previousTotalPageSize = 0; + private boolean allowPartialSearchResults; + public SampleIterator( QueryClient client, List criteria, int fetchSize, Limit limit, CircuitBreaker circuitBreaker, - int maxSamplesPerKey + int maxSamplesPerKey, + boolean allowPartialSearchResults ) { this.client = client; this.criteria = criteria; @@ -100,6 +107,7 @@ public SampleIterator( this.limit = limit; this.circuitBreaker = circuitBreaker; this.maxSamplesPerKey = maxSamplesPerKey; + this.allowPartialSearchResults = allowPartialSearchResults; } @Override @@ -147,6 +155,7 @@ private void advance(ActionListener listener) { private void queryForCompositeAggPage(ActionListener listener, final SampleQueryRequest request) { client.query(request, listener.delegateFailureAndWrap((delegate, r) -> { + addShardFailures(shardFailures, r); // either the fields values or the fields themselves are missing // or the filter applied on the eql query matches no documents if (r.hasAggregations() == false) { @@ -209,13 +218,16 @@ private void finalStep(ActionListener listener) { for (SampleCriterion criterion : criteria) { SampleQueryRequest r = criterion.finalQuery(); r.singleKeyPair(compositeKeyValues, maxCriteria, maxSamplesPerKey); - searches.add(prepareRequest(r.searchSource(), false, EMPTY_ARRAY)); + searches.add(prepareRequest(r.searchSource(), false, allowPartialSearchResults, EMPTY_ARRAY)); } sampleKeys.add(new SequenceKey(compositeKeyValues.toArray())); } int initialSize = samples.size(); client.multiQuery(searches, listener.delegateFailureAndWrap((delegate, r) -> { + for (MultiSearchResponse.Item item : r) { + addShardFailures(shardFailures, item.getResponse()); + } List> sample = new ArrayList<>(maxCriteria); MultiSearchResponse.Item[] response = r.getResponses(); int docGroupsCounter = 1; @@ -280,14 +292,23 @@ private void payload(ActionListener listener) { log.trace("Sending payload for [{}] samples", samples.size()); if (samples.isEmpty()) { - listener.onResponse(new EmptyPayload(Type.SAMPLE, timeTook())); + listener.onResponse(new EmptyPayload(Type.SAMPLE, timeTook(), shardFailures.values().toArray(new ShardSearchFailure[0]))); return; } // get results through search (to keep using PIT) client.fetchHits( hits(samples), - ActionListeners.map(listener, listOfHits -> new SamplePayload(samples, listOfHits, false, timeTook())) + ActionListeners.map( + listener, + listOfHits -> new SamplePayload( + samples, + listOfHits, + false, + timeTook(), + shardFailures.values().toArray(new ShardSearchFailure[0]) + ) + ) ); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java index 121f4c208273b..aee084dd88734 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.execution.sample; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event; @@ -19,8 +20,14 @@ class SamplePayload extends AbstractPayload { private final List values; - SamplePayload(List samples, List> docs, boolean timedOut, TimeValue timeTook) { - super(timedOut, timeTook); + SamplePayload( + List samples, + List> docs, + boolean timedOut, + TimeValue timeTook, + ShardSearchFailure[] shardFailures + ) { + super(timedOut, timeTook, shardFailures); values = new ArrayList<>(samples.size()); for (int i = 0; i < samples.size(); i++) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java index 6cbe5298b5950..18623c17dcffb 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java @@ -46,12 +46,14 @@ public class BasicQueryClient implements QueryClient { final Client client; final String[] indices; final List fetchFields; + private final boolean allowPartialSearchResults; public BasicQueryClient(EqlSession eqlSession) { this.cfg = eqlSession.configuration(); this.client = eqlSession.client(); this.indices = cfg.indices(); this.fetchFields = cfg.fetchFields(); + this.allowPartialSearchResults = cfg.allowPartialSearchResults(); } @Override @@ -60,11 +62,11 @@ public void query(QueryRequest request, ActionListener listener) // set query timeout searchSource.timeout(cfg.requestTimeout()); - SearchRequest search = prepareRequest(searchSource, false, indices); - search(search, searchLogListener(listener, log)); + SearchRequest search = prepareRequest(searchSource, false, allowPartialSearchResults, indices); + search(search, allowPartialSearchResults, searchLogListener(listener, log, allowPartialSearchResults)); } - protected void search(SearchRequest search, ActionListener listener) { + protected void search(SearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { if (cfg.isCancelled()) { listener.onFailure(new TaskCancelledException("cancelled")); return; @@ -77,7 +79,7 @@ protected void search(SearchRequest search, ActionListener liste client.search(search, listener); } - protected void search(MultiSearchRequest search, ActionListener listener) { + protected void search(MultiSearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { if (cfg.isCancelled()) { listener.onFailure(new TaskCancelledException("cancelled")); return; @@ -91,7 +93,7 @@ protected void search(MultiSearchRequest search, ActionListener> refs, ActionListener { + search(multiSearchBuilder.request(), allowPartialSearchResults, listener.delegateFailureAndWrap((delegate, r) -> { for (MultiSearchResponse.Item item : r.getResponses()) { // check for failures if (item.isFailure()) { @@ -187,6 +189,6 @@ public void multiQuery(List searches, ActionListener listener) { + protected void search(SearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { // no pitId, ask for one if (pitId == null) { - openPIT(listener, () -> searchWithPIT(search, listener)); + openPIT(listener, () -> searchWithPIT(search, listener, allowPartialSearchResults), allowPartialSearchResults); } else { - searchWithPIT(search, listener); + searchWithPIT(search, listener, allowPartialSearchResults); } } - private void searchWithPIT(SearchRequest request, ActionListener listener) { + private void searchWithPIT(SearchRequest request, ActionListener listener, boolean allowPartialSearchResults) { makeRequestPITCompatible(request); // get the pid on each response - super.search(request, pitListener(SearchResponse::pointInTimeId, listener)); + super.search(request, allowPartialSearchResults, pitListener(SearchResponse::pointInTimeId, listener)); } @Override - protected void search(MultiSearchRequest search, ActionListener listener) { + protected void search(MultiSearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { // no pitId, ask for one if (pitId == null) { - openPIT(listener, () -> searchWithPIT(search, listener)); + openPIT(listener, () -> searchWithPIT(search, allowPartialSearchResults, listener), allowPartialSearchResults); } else { - searchWithPIT(search, listener); + searchWithPIT(search, allowPartialSearchResults, listener); } } - private void searchWithPIT(MultiSearchRequest search, ActionListener listener) { + private void searchWithPIT(MultiSearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { for (SearchRequest request : search.requests()) { makeRequestPITCompatible(request); } // get the pid on each request - super.search(search, pitListener(r -> { + super.search(search, allowPartialSearchResults, pitListener(r -> { // get pid for (MultiSearchResponse.Item item : r.getResponses()) { // pick the first non-failing response @@ -135,9 +135,10 @@ private ActionListener pitListener( ); } - private void openPIT(ActionListener listener, Runnable runnable) { + private void openPIT(ActionListener listener, Runnable runnable, boolean allowPartialSearchResults) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).indicesOptions(IndexResolver.FIELD_CAPS_INDICES_OPTIONS) - .keepAlive(keepAlive); + .keepAlive(keepAlive) + .allowPartialSearchResults(allowPartialSearchResults); request.indexFilter(filter); client.execute(TransportOpenPointInTimeAction.TYPE, request, listener.delegateFailureAndWrap((l, r) -> { pitId = r.getPointInTimeId(); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index 40f7f7139efa1..92af8c562f840 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -56,10 +56,14 @@ public final class RuntimeUtils { private RuntimeUtils() {} - public static ActionListener searchLogListener(ActionListener listener, Logger log) { + public static ActionListener searchLogListener( + ActionListener listener, + Logger log, + boolean allowPartialResults + ) { return listener.delegateFailureAndWrap((delegate, response) -> { ShardSearchFailure[] failures = response.getShardFailures(); - if (CollectionUtils.isEmpty(failures) == false) { + if (CollectionUtils.isEmpty(failures) == false && allowPartialResults == false) { delegate.onFailure(new EqlIllegalArgumentException(failures[0].reason(), failures[0].getCause())); return; } @@ -70,16 +74,22 @@ public static ActionListener searchLogListener(ActionListener multiSearchLogListener(ActionListener listener, Logger log) { + public static ActionListener multiSearchLogListener( + ActionListener listener, + boolean allowPartialSearchResults, + Logger log + ) { return listener.delegateFailureAndWrap((delegate, items) -> { for (MultiSearchResponse.Item item : items) { Exception failure = item.getFailure(); SearchResponse response = item.getResponse(); if (failure == null) { - ShardSearchFailure[] failures = response.getShardFailures(); - if (CollectionUtils.isEmpty(failures) == false) { - failure = new EqlIllegalArgumentException(failures[0].reason(), failures[0].getCause()); + if (allowPartialSearchResults == false) { + ShardSearchFailure[] failures = response.getShardFailures(); + if (CollectionUtils.isEmpty(failures) == false) { + failure = new EqlIllegalArgumentException(failures[0].reason(), failures[0].getCause()); + } } } if (failure != null) { @@ -170,11 +180,16 @@ public static HitExtractor createExtractor(FieldExtraction ref, EqlConfiguration throw new EqlIllegalArgumentException("Unexpected value reference {}", ref.getClass()); } - public static SearchRequest prepareRequest(SearchSourceBuilder source, boolean includeFrozen, String... indices) { + public static SearchRequest prepareRequest( + SearchSourceBuilder source, + boolean includeFrozen, + boolean allowPartialSearchResults, + String... indices + ) { SearchRequest searchRequest = new SearchRequest(); searchRequest.indices(indices); searchRequest.source(source); - searchRequest.allowPartialSearchResults(false); + searchRequest.allowPartialSearchResults(allowPartialSearchResults); searchRequest.indicesOptions( includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS ); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java index 45083babddbb4..b4a8edc79b3ad 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.execution.sequence; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event; @@ -19,8 +20,14 @@ class SequencePayload extends AbstractPayload { private final List values; - SequencePayload(List sequences, List> docs, boolean timedOut, TimeValue timeTook) { - super(timedOut, timeTook); + SequencePayload( + List sequences, + List> docs, + boolean timedOut, + TimeValue timeTook, + ShardSearchFailure[] shardFailures + ) { + super(timedOut, timeTook, shardFailures); values = new ArrayList<>(sequences.size()); for (int i = 0; i < sequences.size(); i++) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java index eabf6df518ad4..fac8788db0f95 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -41,6 +42,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -51,6 +53,7 @@ import static org.elasticsearch.action.ActionListener.runAfter; import static org.elasticsearch.xpack.eql.execution.ExecutionUtils.copySource; import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.combineFilters; +import static org.elasticsearch.xpack.eql.util.SearchHitUtils.addShardFailures; import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex; /** @@ -103,6 +106,9 @@ protected boolean removeEldestEntry(Map.Entry eldest) { private final boolean hasKeys; private final List> listOfKeys; + private final boolean allowPartialSearchResults; + private final boolean allowPartialSequenceResults; + private Map shardFailures = new HashMap<>(); // flag used for DESC sequences to indicate whether // the window needs to restart (since the DESC query still has results) @@ -127,7 +133,10 @@ public TumblingWindow( List criteria, SequenceCriterion until, SequenceMatcher matcher, - List> listOfKeys + List> listOfKeys, + boolean allowPartialSearchResults, + boolean allowPartialSequenceResults + ) { this.client = client; @@ -141,6 +150,8 @@ public TumblingWindow( this.hasKeys = baseRequest.keySize() > 0; this.restartWindowFromTailQuery = baseRequest.descending(); this.listOfKeys = listOfKeys; + this.allowPartialSearchResults = allowPartialSearchResults; + this.allowPartialSequenceResults = allowPartialSequenceResults; } @Override @@ -158,6 +169,9 @@ public void execute(ActionListener listener) { * Move the window while preserving the same base. */ private void tumbleWindow(int currentStage, ActionListener listener) { + if (allowPartialSequenceResults == false && shardFailures.isEmpty() == false) { + doPayload(listener); + } if (currentStage > matcher.firstPositiveStage && matcher.hasCandidates() == false) { if (restartWindowFromTailQuery) { currentStage = matcher.firstPositiveStage; @@ -224,6 +238,9 @@ public void checkMissingEvents(Runnable next, ActionListener listener) private void doCheckMissingEvents(List batchToCheck, MultiSearchResponse p, ActionListener listener, Runnable next) { MultiSearchResponse.Item[] responses = p.getResponses(); + for (MultiSearchResponse.Item response : responses) { + addShardFailures(shardFailures, response.getResponse()); + } int nextResponse = 0; for (Sequence sequence : batchToCheck) { boolean leading = true; @@ -316,7 +333,14 @@ private List prepareQueryForMissingEvents(List toCheck) } addKeyFilter(i, sequence, builder); RuntimeUtils.combineFilters(builder, range); - result.add(RuntimeUtils.prepareRequest(builder.size(1).trackTotalHits(false), false, Strings.EMPTY_ARRAY)); + result.add( + RuntimeUtils.prepareRequest( + builder.size(1).trackTotalHits(false), + false, + allowPartialSearchResults, + Strings.EMPTY_ARRAY + ) + ); } else { leading = false; } @@ -361,6 +385,7 @@ private void advance(int stage, ActionListener listener) { * Execute the base query. */ private void baseCriterion(int baseStage, SearchResponse r, ActionListener listener) { + addShardFailures(shardFailures, r); SequenceCriterion base = criteria.get(baseStage); SearchHits hits = r.getHits(); @@ -731,8 +756,10 @@ private void doPayload(ActionListener listener) { log.trace("Sending payload for [{}] sequences", completed.size()); - if (completed.isEmpty()) { - listener.onResponse(new EmptyPayload(Type.SEQUENCE, timeTook())); + if (completed.isEmpty() || (allowPartialSequenceResults == false && shardFailures.isEmpty() == false)) { + listener.onResponse( + new EmptyPayload(Type.SEQUENCE, timeTook(), shardFailures.values().toArray(new ShardSearchFailure[shardFailures.size()])) + ); return; } @@ -741,7 +768,13 @@ private void doPayload(ActionListener listener) { if (criteria.get(matcher.firstPositiveStage).descending()) { Collections.reverse(completed); } - return new SequencePayload(completed, addMissingEventPlaceholders(listOfHits), false, timeTook()); + return new SequencePayload( + completed, + addMissingEventPlaceholders(listOfHits), + false, + timeTook(), + shardFailures.values().toArray(new ShardSearchFailure[0]) + ); })); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java index 084a5e74a47e8..210f88c991539 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java @@ -60,6 +60,20 @@ public class EqlPlugin extends Plugin implements ActionPlugin, CircuitBreakerPlu Setting.Property.DeprecatedWarning ); + public static final Setting DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS = Setting.boolSetting( + "xpack.eql.default_allow_partial_results", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS = Setting.boolSetting( + "xpack.eql.default_allow_partial_sequence_results", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + public EqlPlugin() {} @Override @@ -86,7 +100,7 @@ private Collection createComponents(Client client, Settings settings, Cl */ @Override public List> getSettings() { - return List.of(EQL_ENABLED_SETTING); + return List.of(EQL_ENABLED_SETTING, DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java index e24a4749f45cd..65def24563e5e 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java @@ -64,6 +64,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } eqlRequest.keepOnCompletion(request.paramAsBoolean("keep_on_completion", eqlRequest.keepOnCompletion())); eqlRequest.ccsMinimizeRoundtrips(request.paramAsBoolean("ccs_minimize_roundtrips", eqlRequest.ccsMinimizeRoundtrips())); + eqlRequest.allowPartialSearchResults( + request.paramAsBoolean("allow_partial_search_results", eqlRequest.allowPartialSearchResults()) + ); + eqlRequest.allowPartialSequenceResults( + request.paramAsBoolean("allow_partial_sequence_results", eqlRequest.allowPartialSequenceResults()) + ); } return channel -> { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java index c0141da2432ce..582352722fc58 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -144,7 +145,8 @@ public EqlSearchResponse initialResponse(EqlSearchTask task) { false, task.getExecutionId().getEncoded(), true, - true + true, + ShardSearchFailure.EMPTY_ARRAY ); } @@ -231,6 +233,12 @@ public static void operation( request.indicesOptions(), request.fetchSize(), request.maxSamplesPerKey(), + request.allowPartialSearchResults() == null + ? defaultAllowPartialSearchResults(clusterService) + : request.allowPartialSearchResults(), + request.allowPartialSequenceResults() == null + ? defaultAllowPartialSequenceResults(clusterService) + : request.allowPartialSequenceResults(), clientId, new TaskId(nodeId, task.getId()), task @@ -244,12 +252,34 @@ public static void operation( } } + private static boolean defaultAllowPartialSearchResults(ClusterService clusterService) { + if (clusterService.getClusterSettings() == null) { + return EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getDefault(Settings.EMPTY); + } + return clusterService.getClusterSettings().get(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS); + } + + private static boolean defaultAllowPartialSequenceResults(ClusterService clusterService) { + if (clusterService.getClusterSettings() == null) { + return EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS.getDefault(Settings.EMPTY); + } + return clusterService.getClusterSettings().get(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS); + } + static EqlSearchResponse createResponse(Results results, AsyncExecutionId id) { EqlSearchResponse.Hits hits = new EqlSearchResponse.Hits(results.events(), results.sequences(), results.totalHits()); if (id != null) { - return new EqlSearchResponse(hits, results.tookTime().getMillis(), results.timedOut(), id.getEncoded(), false, false); + return new EqlSearchResponse( + hits, + results.tookTime().getMillis(), + results.timedOut(), + id.getEncoded(), + false, + false, + results.shardFailures() + ); } else { - return new EqlSearchResponse(hits, results.tookTime().getMillis(), results.timedOut()); + return new EqlSearchResponse(hits, results.tookTime().getMillis(), results.timedOut(), results.shardFailures()); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java index 9822285465087..33ed5799cd073 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.session; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import java.util.List; @@ -17,14 +18,16 @@ public class EmptyPayload implements Payload { private final Type type; private final TimeValue timeTook; + private final ShardSearchFailure[] shardFailures; public EmptyPayload(Type type) { - this(type, TimeValue.ZERO); + this(type, TimeValue.ZERO, ShardSearchFailure.EMPTY_ARRAY); } - public EmptyPayload(Type type, TimeValue timeTook) { + public EmptyPayload(Type type, TimeValue timeTook, ShardSearchFailure[] shardFailures) { this.type = type; this.timeTook = timeTook; + this.shardFailures = shardFailures; } @Override @@ -46,4 +49,10 @@ public TimeValue timeTook() { public List values() { return emptyList(); } + + @Override + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } + } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java index 8dd8220fb63bc..8242b0b533ad3 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java @@ -30,6 +30,8 @@ public class EqlConfiguration extends org.elasticsearch.xpack.ql.session.Configu private final EqlSearchTask task; private final int fetchSize; private final int maxSamplesPerKey; + private final boolean allowPartialSearchResults; + private final boolean allowPartialSequenceResults; @Nullable private final QueryBuilder filter; @@ -50,6 +52,8 @@ public EqlConfiguration( IndicesOptions indicesOptions, int fetchSize, int maxSamplesPerKey, + boolean allowPartialSearchResults, + boolean allowPartialSequenceResults, String clientId, TaskId taskId, EqlSearchTask task @@ -67,6 +71,8 @@ public EqlConfiguration( this.task = task; this.fetchSize = fetchSize; this.maxSamplesPerKey = maxSamplesPerKey; + this.allowPartialSearchResults = allowPartialSearchResults; + this.allowPartialSequenceResults = allowPartialSequenceResults; } public String[] indices() { @@ -89,6 +95,14 @@ public int maxSamplesPerKey() { return maxSamplesPerKey; } + public boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public boolean allowPartialSequenceResults() { + return allowPartialSequenceResults; + } + public QueryBuilder filter() { return filter; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java index 1d82478e6db26..05e614714a5aa 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.session; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import java.util.List; @@ -29,4 +30,6 @@ enum Type { TimeValue timeTook(); List values(); + + ShardSearchFailure[] shardFailures(); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java index bb76c08c801cb..13886470f21f5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Sequence; @@ -23,18 +24,28 @@ public class Results { private final boolean timedOut; private final TimeValue tookTime; private final Type type; + private ShardSearchFailure[] shardFailures; public static Results fromPayload(Payload payload) { List values = payload.values(); - return new Results(new TotalHits(values.size(), Relation.EQUAL_TO), payload.timeTook(), false, values, payload.resultType()); + payload.shardFailures(); + return new Results( + new TotalHits(values.size(), Relation.EQUAL_TO), + payload.timeTook(), + false, + values, + payload.resultType(), + payload.shardFailures() + ); } - Results(TotalHits totalHits, TimeValue tookTime, boolean timedOut, List results, Type type) { + Results(TotalHits totalHits, TimeValue tookTime, boolean timedOut, List results, Type type, ShardSearchFailure[] shardFailures) { this.totalHits = totalHits; this.tookTime = tookTime; this.timedOut = timedOut; this.results = results; this.type = type; + this.shardFailures = shardFailures; } public TotalHits totalHits() { @@ -51,6 +62,10 @@ public List sequences() { return (type == Type.SEQUENCE || type == Type.SAMPLE) ? (List) results : null; } + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } + public TimeValue tookTime() { return tookTime; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java index 91795ac15b53e..2b5ec9718cfc4 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java @@ -7,8 +7,12 @@ package org.elasticsearch.xpack.eql.util; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.search.SearchHit; +import java.util.Map; + import static org.elasticsearch.transport.RemoteClusterAware.buildRemoteIndexName; public final class SearchHitUtils { @@ -16,4 +20,12 @@ public final class SearchHitUtils { public static String qualifiedIndex(SearchHit hit) { return buildRemoteIndexName(hit.getClusterAlias(), hit.getIndex()); } + + public static void addShardFailures(Map shardFailures, SearchResponse r) { + if (r.getShardFailures() != null) { + for (ShardSearchFailure shardFailure : r.getShardFailures()) { + shardFailures.put(shardFailure.toString(), shardFailure); + } + } + } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java index a1aa8e4bd98d7..75884fab4dbb3 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java @@ -51,6 +51,8 @@ private EqlTestUtils() {} null, 123, 1, + false, + true, "", new TaskId("test", 123), null @@ -69,6 +71,8 @@ public static EqlConfiguration randomConfiguration() { randomIndicesOptions(), randomIntBetween(1, 1000), randomIntBetween(1, 1000), + randomBoolean(), + randomBoolean(), randomAlphaOfLength(16), new TaskId(randomAlphaOfLength(10), randomNonNegativeLong()), randomTask() diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java index 0ff9fa9131b27..1a06aead910c8 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java @@ -80,6 +80,8 @@ protected EqlSearchRequest createTestInstance() { .waitForCompletionTimeout(randomTimeValue()) .keepAlive(randomTimeValue()) .keepOnCompletion(randomBoolean()) + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()) .fetchFields(randomFetchFields) .runtimeMappings(randomRuntimeMappings()) .resultPosition(randomFrom("tail", "head")) @@ -136,6 +138,12 @@ protected EqlSearchRequest mutateInstanceForVersion(EqlSearchRequest instance, T mutatedInstance.runtimeMappings(version.onOrAfter(TransportVersions.V_7_13_0) ? instance.runtimeMappings() : emptyMap()); mutatedInstance.resultPosition(version.onOrAfter(TransportVersions.V_7_17_8) ? instance.resultPosition() : "tail"); mutatedInstance.maxSamplesPerKey(version.onOrAfter(TransportVersions.V_8_7_0) ? instance.maxSamplesPerKey() : 1); + mutatedInstance.allowPartialSearchResults( + version.onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS) ? instance.allowPartialSearchResults() : false + ); + mutatedInstance.allowPartialSequenceResults( + version.onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS) ? instance.allowPartialSequenceResults() : false + ); return mutatedInstance; } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index 6cb283d11848e..fa118a5256df1 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -190,7 +191,7 @@ public static EqlSearchResponse createRandomEventsResponse(TotalHits totalHits, hits = new EqlSearchResponse.Hits(randomEvents(xType), null, totalHits); } if (randomBoolean()) { - return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean()); + return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean(), ShardSearchFailure.EMPTY_ARRAY); } else { return new EqlSearchResponse( hits, @@ -198,7 +199,8 @@ public static EqlSearchResponse createRandomEventsResponse(TotalHits totalHits, randomBoolean(), randomAlphaOfLength(10), randomBoolean(), - randomBoolean() + randomBoolean(), + ShardSearchFailure.EMPTY_ARRAY ); } } @@ -222,7 +224,7 @@ public static EqlSearchResponse createRandomSequencesResponse(TotalHits totalHit hits = new EqlSearchResponse.Hits(null, seq, totalHits); } if (randomBoolean()) { - return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean()); + return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean(), ShardSearchFailure.EMPTY_ARRAY); } else { return new EqlSearchResponse( hits, @@ -230,7 +232,8 @@ public static EqlSearchResponse createRandomSequencesResponse(TotalHits totalHit randomBoolean(), randomAlphaOfLength(10), randomBoolean(), - randomBoolean() + randomBoolean(), + ShardSearchFailure.EMPTY_ARRAY ); } } @@ -273,7 +276,8 @@ protected EqlSearchResponse mutateInstanceForVersion(EqlSearchResponse instance, instance.isTimeout(), instance.id(), instance.isRunning(), - instance.isPartial() + instance.isPartial(), + ShardSearchFailure.EMPTY_ARRAY ); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java index 4d5201f544d72..33573b99546fb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java @@ -7,26 +7,41 @@ package org.elasticsearch.xpack.eql.action; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.CircuitBreakerPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.eql.plugin.EqlPlugin; import org.elasticsearch.xpack.ql.plugin.QlPlugin; import java.nio.file.Path; -public class LocalStateEQLXPackPlugin extends LocalStateCompositeXPackPlugin { +public class LocalStateEQLXPackPlugin extends LocalStateCompositeXPackPlugin implements CircuitBreakerPlugin { + + private final EqlPlugin eqlPlugin; public LocalStateEQLXPackPlugin(final Settings settings, final Path configPath) { super(settings, configPath); LocalStateEQLXPackPlugin thisVar = this; - plugins.add(new EqlPlugin() { + this.eqlPlugin = new EqlPlugin() { @Override protected XPackLicenseState getLicenseState() { return thisVar.getLicenseState(); } - }); + }; + plugins.add(eqlPlugin); plugins.add(new QlPlugin()); } + @Override + public BreakerSettings getCircuitBreaker(Settings settings) { + return eqlPlugin.getCircuitBreaker(settings); + } + + @Override + public void setCircuitBreaker(CircuitBreaker circuitBreaker) { + eqlPlugin.setCircuitBreaker(circuitBreaker); + } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index 7bb6a228f6e48..abd928b04a9c7 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -141,7 +141,15 @@ public void testImplicitTiebreakerBeingSet() { booleanArrayOf(stages, false), NOOP_CIRCUIT_BREAKER ); - TumblingWindow window = new TumblingWindow(client, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + client, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(wrap(p -> {}, ex -> { throw ExceptionsHelper.convertToRuntime(ex); })); } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index a8ed842e94c44..f6aa851b2fff0 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -277,7 +277,15 @@ public void test() throws Exception { ); QueryClient testClient = new TestQueryClient(); - TumblingWindow window = new TumblingWindow(testClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + testClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); // finally make the assertion at the end of the listener window.execute(ActionTestUtils.assertNoFailureListener(this::checkResults)); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index dc132659417ff..80b1ff97b725d 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -89,7 +89,7 @@ public void query(QueryRequest r, ActionListener l) {} @Override public void fetchHits(Iterable> refs, ActionListener>> listener) {} - }, mockCriteria(), randomIntBetween(10, 500), new Limit(1000, 0), CIRCUIT_BREAKER, 1); + }, mockCriteria(), randomIntBetween(10, 500), new Limit(1000, 0), CIRCUIT_BREAKER, 1, randomBoolean()); CIRCUIT_BREAKER.startBreaking(); iterator.pushToStack(new SampleIterator.Page(CB_STACK_SIZE_PRECISION - 1)); @@ -142,7 +142,8 @@ public void fetchHits(Iterable> refs, ActionListener> refs, ActionListener { // do nothing, we don't care about the query results }, ex -> { fail("Shouldn't have failed"); })); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index fe1fca45364e3..58448d981fcca 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -146,7 +146,15 @@ public void testCircuitBreakerTumblingWindow() { booleanArrayOf(stages, false), CIRCUIT_BREAKER ); - TumblingWindow window = new TumblingWindow(client, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + client, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(ActionTestUtils.assertNoFailureListener(p -> {})); CIRCUIT_BREAKER.startBreaking(); @@ -228,7 +236,15 @@ private void assertMemoryCleared( booleanArrayOf(sequenceFiltersCount, false), eqlCircuitBreaker ); - TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + eqlClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(ActionListener.noop()); assertTrue(esClient.searchRequestsRemainingCount() == 0); // ensure all the search requests have been asked for @@ -271,7 +287,15 @@ public void testEqlCBCleanedUp_on_ParentCBBreak() { booleanArrayOf(sequenceFiltersCount, false), eqlCircuitBreaker ); - TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + eqlClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(wrap(p -> fail(), ex -> assertTrue(ex instanceof CircuitBreakingException))); } assertCriticalWarnings("[indices.breaker.total.limit] setting of [0%] is below the recommended minimum of 50.0% of the heap"); @@ -329,6 +353,8 @@ private QueryClient buildQueryClient(ESMockClient esClient, CircuitBreaker eqlCi null, 123, 1, + randomBoolean(), + randomBoolean(), "", new TaskId("test", 123), new EqlSearchTask( diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java index 1a2f00463b49b..2eee6a262e73c 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java @@ -83,6 +83,8 @@ public void testHandlingPitFailure() { null, 123, 1, + randomBoolean(), + randomBoolean(), "", new TaskId("test", 123), new EqlSearchTask( @@ -132,7 +134,15 @@ public void testHandlingPitFailure() { ); SequenceMatcher matcher = new SequenceMatcher(1, false, TimeValue.MINUS_ONE, null, booleanArrayOf(1, false), cb); - TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + eqlClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute( wrap( p -> { fail("Search succeeded despite PIT failure"); }, diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java index 62703fa400ff7..1f10abf3b9fb0 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java @@ -11,7 +11,6 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -import java.util.function.Function; /** * Used on parameters on methods annotated with {@link Evaluator} to indicate @@ -27,12 +26,23 @@ boolean includeInToString() default true; /** - * Should the Evaluator's factory build this per evaluator with a - * {@code Function} or just take fixed implementation? - * This is typically set to {@code true} to use the {@link Function} - * to make "scratch" objects which have to be isolated in a single thread. - * This is typically set to {@code false} when the parameter is simply - * immutable and can be shared. + * Defines the scope of the parameter. + * - SINGLETON (default) will build a single instance and share it across all evaluators + * - THREAD_LOCAL will build a new instance for each evaluator thread */ - boolean build() default false; + Scope scope() default Scope.SINGLETON; + + /** + * Defines the parameter scope + */ + enum Scope { + /** + * Should be used for immutable parameters that can be shared across different threads + */ + SINGLETON, + /** + * Should be used for mutable or not thread safe parameters + */ + THREAD_LOCAL, + } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java index 5869eff23a9ab..b4a0cf9127f23 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java @@ -16,6 +16,7 @@ import com.squareup.javapoet.TypeSpec; import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.ann.Fixed.Scope; import java.util.ArrayList; import java.util.Arrays; @@ -725,7 +726,7 @@ public String closeInvocation() { } } - private record FixedProcessFunctionArg(TypeName type, String name, boolean includeInToString, boolean build, boolean releasable) + private record FixedProcessFunctionArg(TypeName type, String name, boolean includeInToString, Scope scope, boolean releasable) implements ProcessFunctionArg { @Override @@ -762,12 +763,18 @@ public void implementFactoryCtor(MethodSpec.Builder builder) { } private TypeName factoryFieldType() { - return build ? ParameterizedTypeName.get(ClassName.get(Function.class), DRIVER_CONTEXT, type.box()) : type; + return switch (scope) { + case SINGLETON -> type; + case THREAD_LOCAL -> ParameterizedTypeName.get(ClassName.get(Function.class), DRIVER_CONTEXT, type.box()); + }; } @Override public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { - return build ? name + ".apply(context)" : name; + return switch (scope) { + case SINGLETON -> name; + case THREAD_LOCAL -> name + ".apply(context)"; + }; } @Override @@ -1020,7 +1027,7 @@ private ProcessFunction( type, name, fixed.includeInToString(), - fixed.build(), + fixed.scope(), Types.extendsSuper(types, v.asType(), "org.elasticsearch.core.Releasable") ) ); diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index 5efe7ffc800a2..004beaafb4009 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -21,7 +21,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V6; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V7; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @@ -96,7 +96,7 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V6.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V7.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index dd75776973c3d..c75a920e16973 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V6; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V7; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V6.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V7.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -283,8 +283,8 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - // CCS does not yet support JOIN_LOOKUP_V6 and clusters falsely report they have this capability - // return hasCapabilities(List.of(JOIN_LOOKUP_V6.capabilityName())); + // CCS does not yet support JOIN_LOOKUP_V7 and clusters falsely report they have this capability + // return hasCapabilities(List.of(JOIN_LOOKUP_V7.capabilityName())); return false; } } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java index 2aae4c94c33fe..40027249670f6 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java @@ -221,7 +221,7 @@ public void testIndicesDontExist() throws IOException { assertThat(e.getMessage(), containsString("index_not_found_exception")); assertThat(e.getMessage(), containsString("no such index [foo]")); - if (EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()) { + if (EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()) { e = expectThrows( ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM test1 | LOOKUP JOIN foo ON id1")) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index becd4d8694bbf..48800cf4304cc 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -104,7 +104,7 @@ public class CsvTestsDataLoader { private static final TestsDataset DISTANCES = new TestsDataset("distances"); private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv").withSetting("k8s-settings.json"); private static final TestsDataset ADDRESSES = new TestsDataset("addresses"); - private static final TestsDataset BOOKS = new TestsDataset("books"); + private static final TestsDataset BOOKS = new TestsDataset("books").withSetting("books-settings.json"); private static final TestsDataset SEMANTIC_TEXT = new TestsDataset("semantic_text").withInferenceEndpoint(true); public static final Map CSV_DATASET_MAP = Map.ofEntries( diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books-settings.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books-settings.json new file mode 100644 index 0000000000000..b324c27b40653 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books-settings.json @@ -0,0 +1,5 @@ +{ + "index": { + "number_of_shards": 3 + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv index 1deefaa3c6475..1cb01687e6511 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/data/books.csv @@ -1,80 +1,80 @@ -book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text -2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. -7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. -7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. -2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th -4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. -2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. -7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. -7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. -4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie -9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. -9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature -1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. -2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi -2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments -8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. -8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit -6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. -4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. -4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. -2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. -8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. -6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. -3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college -5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. -5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community -6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa -5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. -3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! -4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. -7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. -3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. -3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. -4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. -6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. -3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. -8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. -1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion -6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. -2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. -7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. -6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. -8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. -2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. -1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). -5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. -2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. -2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. -4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. -5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. -1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. -2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. -4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. -7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. -6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. -2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. -2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. -2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. -9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. -5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. -2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. -7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. -5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. -8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. -4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. -4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. -7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. -1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. -8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. -8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. -8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. -2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. -3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. -5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. -4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. -9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. -5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. -8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. -6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. -8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. +_id:keyword,book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text +0,2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. +1,7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. +2,7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. +3,2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th +4,4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. +5,2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. +6,7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. +7,7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. +8,4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie +9,9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. +10,9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature +11,1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. +12,2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi +13,2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments +14,8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. +15,8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit +16,6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. +17,4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. +18,4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. +19,2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. +20,8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. +21,6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. +22,3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college +23,5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. +24,5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community +25,6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa +26,5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. +27,3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! +28,4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. +29,7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. +30,3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. +31,3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. +32,4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. +33,6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. +34,3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. +35,8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. +36,1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion +37,6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. +38,2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. +39,7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. +40,6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. +41,8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. +42,2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. +43,1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). +44,5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +45,2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +46,2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. +47,4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +48,5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +49,1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. +50,2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +51,4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. +52,7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +53,6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +54,2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. +55,2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. +56,2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. +57,9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. +58,5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. +59,2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. +60,7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. +61,5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. +62,8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. +63,4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +64,4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. +65,7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. +66,1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. +67,8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. +68,8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +69,8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. +70,2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. +71,3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. +72,5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. +73,4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. +74,9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. +75,5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. +76,8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. +77,6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. +78,8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index f4b5c98d596ae..4206d6b48699f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -216,6 +216,137 @@ millis:date | nanos:date_nanos | num:long 2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z | 1698068014937193000 ; +implicit casting to nanos, date only +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date only, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +; + + +implicit casting to nanos, date plus time to seconds +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23T00:00:00" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date plus time to seconds, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23T12:27:28" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +; + +implicit casting to nanos, date plus time to millis +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23T00:00:00.000" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date plus time to millis, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23T12:27:28.948" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +; + +implicit casting to nanos, date plus time to nanos +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23T00:00:00.000000000" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date plus time to nanos, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23T12:27:28.948000000" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +; + date nanos greater than millis required_capability: date_nanos_type required_capability: date_nanos_compare_to_millis @@ -555,7 +686,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY yr = BUCKET(nanos, 1 year); +| STATS ct = count(*) BY yr = BUCKET(nanos, 1 year) +| SORT yr DESC; ct:long | yr:date_nanos 8 | 2023-01-01T00:00:00.000000000Z @@ -567,7 +699,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY yr = BUCKET(nanos, 5, "1999-01-01", NOW()); +| STATS ct = count(*) BY yr = BUCKET(nanos, 5, "1999-01-01", NOW()) +| SORT yr DESC; ct:long | yr:date_nanos 8 | 2023-01-01T00:00:00.000000000Z @@ -579,7 +712,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mo = BUCKET(nanos, 1 month); +| STATS ct = count(*) BY mo = BUCKET(nanos, 1 month) +| SORT mo DESC; ct:long | mo:date_nanos 8 | 2023-10-01T00:00:00.000000000Z @@ -591,7 +725,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mo = BUCKET(nanos, 20, "2023-01-01", "2023-12-31"); +| STATS ct = count(*) BY mo = BUCKET(nanos, 20, "2023-01-01", "2023-12-31") +| SORT mo DESC; ct:long | mo:date_nanos 8 | 2023-10-01T00:00:00.000000000Z @@ -603,18 +738,21 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mo = BUCKET(nanos, 55, "2023-01-01", "2023-12-31"); +| STATS ct = count(*) BY mo = BUCKET(nanos, 55, "2023-01-01", "2023-12-31") +| SORT mo DESC; ct:long | mo:date_nanos 8 | 2023-10-23T00:00:00.000000000Z ; + Bucket Date nanos by 10 minutes required_capability: date_trunc_date_nanos required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mn = BUCKET(nanos, 10 minutes); +| STATS ct = count(*) BY mn = BUCKET(nanos, 10 minutes) +| SORT mn DESC; ct:long | mn:date_nanos 4 | 2023-10-23T13:50:00.000000000Z diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hash.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hash.csv-spec new file mode 100644 index 0000000000000..fcac1e1859c6d --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hash.csv-spec @@ -0,0 +1,105 @@ +hash +required_capability: hash_function + +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = hash("md5", message), sha256 = hash("sha256", message) +| KEEP message, md5, sha256; +ignoreOrder:true + +message:keyword | md5:keyword | sha256:keyword +Connected to 10.1.0.1 | abd7d1ce2bb636842a29246b3512dcae | 6d8372129ad78770f7185554dd39864749a62690216460752d6c075fa38ad85c +Connected to 10.1.0.2 | 8f8f1cb60832d153f5b9ec6dc828b93f | b0db24720f15857091b3c99f4c4833586d0ea3229911b8777efb8d917cf27e9a +Connected to 10.1.0.3 | 912b6dc13503165a15de43304bb77c78 | 75b0480188db8acc4d5cc666a51227eb2bc5b989cd8ca912609f33e0846eff57 +Disconnected | ef70e46fd3bbc21e3e1f0b6815e750c0 | 04dfac3671b494ad53fcd152f7a14511bfb35747278aad8ce254a0d6e4ba4718 +; + + +hashOfConvertedType +required_capability: hash_function + +FROM sample_data +| WHERE message != "Connection error" +| EVAL input = event_duration::STRING, md5 = hash("md5", input), sha256 = hash("sha256", input) +| KEEP message, input, md5, sha256; +ignoreOrder:true + +message:keyword | input:keyword | md5:keyword | sha256:keyword +Connected to 10.1.0.1 | 1756467 | c4fc1c57ee9b1d2b2023b70c8c167b54 | 8376a50a7ba7e6bd1bf9ad0c32d27d2f49fd0fa422573f98f239e21048b078f3 +Connected to 10.1.0.2 | 2764889 | 8e8cf005e11a7b5df1d9478a4715a444 | 1031f2bef8eaecbf47319505422300b27ea1f7c38b6717d41332325062f9a56a +Connected to 10.1.0.3 | 3450233 | 09f2c64f5a55e9edf8ffbad336b561d8 | f77d7545769c4ecc85092f4f0b7ec8c20f467e4beb15fe67ca29f9aa8e9a6900 +Disconnected | 1232382 | 6beac1485638d51e13c2c53990a2f611 | 9a03c1274a3ebb6c1cb85d170ce0a6fdb9d2232724e06b9f5e7cb9274af3cad6 +; + + +hashOfEmptyInput +required_capability: hash_function + +ROW input="" | EVAL md5 = hash("md5", input), sha256 = hash("sha256", input); + +input:keyword | md5:keyword | sha256:keyword + | d41d8cd98f00b204e9800998ecf8427e | e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +; + +hashOfNullInput +required_capability: hash_function + +ROW input=null::STRING | EVAL md5 = hash("md5", input), sha256 = hash("sha256", input); + +input:keyword | md5:keyword | sha256:keyword +null | null | null +; + + +hashWithNullAlgorithm +required_capability: hash_function + +ROW input="input" | EVAL hash = hash(null, input); + +input:keyword | hash:keyword +input | null +; + + +hashWithMv +required_capability: hash_function + +ROW input=["foo", "bar"] | mv_expand input | EVAL md5 = hash("md5", input), sha256 = hash("sha256", input); + +input:keyword | md5:keyword | sha256:keyword +foo | acbd18db4cc2f85cedef654fccc4a4d8 | 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae +bar | 37b51d194a7513e45b56f6524f2d51f2 | fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9 +; + + +hashWithNestedFunctions +required_capability: hash_function + +ROW input=["foo", "bar"] | EVAL hash = concat(hash("md5", mv_concat(input, "-")), "-", hash("sha256", mv_concat(input, "-"))); + +input:keyword | hash:keyword +["foo", "bar"] | e5f9ec048d1dbe19c70f720e002f9cb1-7d89c4f517e3bd4b5e8e76687937005b602ea00c5cba3e25ef1fc6575a55103e +; + + +hashWithConvertedTypes +required_capability: hash_function + +ROW input=42 | EVAL md5 = hash("md5", input::STRING), sha256 = hash("sha256", to_string(input)); + +input:integer | md5:keyword | sha256:keyword +42 | a1d0c6e83f027327d8461063f4ac58a6 | 73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049 +; + + +hashWithStats +required_capability: hash_function + +FROM sample_data +| EVAL md5="md5" +| STATS count = count(*) by hash(md5, message) +| WHERE count > 1; + +count:long | hash(md5, message):keyword +3 | 2e92ae79ff32b37fee4368a594792183 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 8b8d24b1bb156..8bcc2c2ff3502 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -8,7 +8,7 @@ ############################################### basicOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages @@ -25,7 +25,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -36,7 +36,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -53,7 +53,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages @@ -71,7 +71,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -89,7 +89,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; sortEvalBeforeLookup -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -106,7 +106,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueLeftKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | WHERE emp_no <= 10030 @@ -130,7 +130,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueRightKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | EVAL language_code = emp_no % 10 @@ -150,7 +150,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyOnTheCoordinator -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -170,7 +170,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = 2 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -186,8 +186,8 @@ language_code:integer | language_name:keyword | country:keyword # Filtering tests with languages_lookup index ############################################### -lookupWithFilterOnLeftSideField -required_capability: join_lookup_v6 +filterOnLeftSide +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages @@ -203,8 +203,8 @@ emp_no:integer | language_code:integer | language_name:keyword 10093 | 3 | Spanish ; -lookupMessageWithFilterOnRightSideField-Ignore -required_capability: join_lookup_v6 +filterOnRightSide +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -219,8 +219,8 @@ FROM sample_data 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error | Error ; -lookupWithFieldAndRightSideAfterStats -required_capability: join_lookup_v6 +filterOnRightSideAfterStats +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -232,23 +232,110 @@ count:long | type:keyword 3 | Error ; -lookupWithFieldOnJoinKey-Ignore -required_capability: join_lookup_v6 +filterOnJoinKey +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages +| WHERE emp_no >= 10091 AND emp_no < 10094 +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code == 1 +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10092 | 1 | English +; + +filterOnJoinKeyAndRightSide +required_capability: join_lookup_v7 + +FROM employees +| WHERE emp_no < 10006 +| EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code | WHERE language_code > 1 AND language_name IS NOT NULL | KEEP emp_no, language_code, language_name ; +ignoreOrder:true emp_no:integer | language_code:integer | language_name:keyword 10001 | 2 | French 10003 | 4 | German ; +filterOnRightSideOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| SORT emp_no +| LIMIT 5 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_name == "English" +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10005 | 1 | English +; + +filterOnJoinKeyOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| SORT emp_no +| LIMIT 5 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code == 1 +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10005 | 1 | English +; + +filterOnJoinKeyAndRightSideOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| SORT emp_no +| LIMIT 5 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code > 1 AND language_name IS NOT NULL +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10001 | 2 | French +10003 | 4 | German +; + +filterOnTheDataNodeThenFilterOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| EVAL language_code = languages +| WHERE emp_no >= 10091 AND emp_no < 10094 +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_name == "English" +| KEEP emp_no, language_code, language_name +| SORT emp_no +| WHERE language_code == 1 +; + +emp_no:integer | language_code:integer | language_name:keyword +10092 | 1 | English +; + +########################################################################### +# null and multi-value behavior with languages_lookup_non_unique_key index +########################################################################### + nullJoinKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | WHERE emp_no < 10004 @@ -264,9 +351,8 @@ emp_no:integer | language_code:integer | language_name:keyword 10003 | null | null ; - mvJoinKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | WHERE 10003 < emp_no AND emp_no < 10008 @@ -284,7 +370,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; mvJoinKeyFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = [4, 5, 6, 7] | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -297,7 +383,7 @@ language_code:integer | language_name:keyword | country:keyword ; mvJoinKeyFromRowExpanded -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = [4, 5, 6, 7, 8] | MV_EXPAND language_code @@ -319,7 +405,7 @@ language_code:integer | language_name:keyword | country:keyword ############################################### lookupIPFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -330,7 +416,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromKeepRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", right = "right" | KEEP left, client_ip, right @@ -342,7 +428,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -353,7 +439,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -366,7 +452,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -379,7 +465,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -398,7 +484,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -418,7 +504,7 @@ ignoreOrder:true ; lookupIPFromIndexKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -440,7 +526,7 @@ timestamp:date | client_ip:keyword | event_duration:long | msg:keyword ; lookupIPFromIndexStats -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -456,7 +542,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -473,7 +559,7 @@ count:long | env:keyword ; statsAndLookupIPFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -494,7 +580,7 @@ count:long | client_ip:keyword | env:keyword ############################################### lookupMessageFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -505,7 +591,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromKeepRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | KEEP left, message, right @@ -517,7 +603,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -528,7 +614,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -540,7 +626,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -558,7 +644,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -577,7 +663,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -597,7 +683,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -616,7 +702,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -631,7 +717,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -647,7 +733,7 @@ count:long | type:keyword ; statsAndLookupMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | STATS count = count(message) BY message @@ -665,7 +751,7 @@ count:long | type:keyword | message:keyword ; lookupMessageFromIndexTwice -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -687,7 +773,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -714,7 +800,7 @@ ignoreOrder:true ############################################### lookupIPAndMessageFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -726,7 +812,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBefore -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | KEEP left, client_ip, message, right @@ -739,7 +825,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBetween -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -752,7 +838,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepAfter -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -765,7 +851,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowing -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", type = "type", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -777,7 +863,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -791,7 +877,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -806,7 +892,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -822,7 +908,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepReordered -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -836,7 +922,7 @@ right | Development | Success | 172.21.0.5 ; lookupIPAndMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -856,7 +942,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -877,7 +963,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexStats -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -895,7 +981,7 @@ count:long | env:keyword | type:keyword ; lookupIPAndMessageFromIndexStatsKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -914,7 +1000,7 @@ count:long | env:keyword | type:keyword ; statsAndLookupIPAndMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -933,7 +1019,7 @@ count:long | client_ip:keyword | message:keyword | env:keyword | type:keyw ; lookupIPAndMessageFromIndexChainedEvalKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -955,7 +1041,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexChainedRenameKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec index 03b24555dbeff..5ea169e1b110d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec @@ -115,6 +115,80 @@ book_no:keyword | title:text 7140 |The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) ; +matchWithDisjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where match(author, "Vonnegut") or match(author, "Guinane") +| keep book_no, author; +ignoreOrder:true + +book_no:keyword | author:text +2464 | Kurt Vonnegut +6970 | Edith Vonnegut +8956 | Kurt Vonnegut +3950 | Kurt Vonnegut +4382 | Carole Guinane +; + +matchWithDisjunctionAndFiltersConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (match(author, "Vonnegut") or match(author, "Guinane")) and year > 1997 +| keep book_no, author, year; +ignoreOrder:true + +book_no:keyword | author:text | year:integer +6970 | Edith Vonnegut | 1998 +4382 | Carole Guinane | 2001 +; + +matchWithDisjunctionAndConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (match(author, "Vonnegut") or match(author, "Marquez")) and match(description, "realism") +| keep book_no; + +book_no:keyword +4814 +; + +matchWithMoreComplexDisjunctionAndConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (match(author, "Vonnegut") and match(description, "charming")) or (match(author, "Marquez") and match(description, "realism")) +| keep book_no; +ignoreOrder:true + +book_no:keyword +6970 +4814 +; + +matchWithDisjunctionIncludingConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where match(author, "Vonnegut") or (match(author, "Marquez") and match(description, "realism")) +| keep book_no; +ignoreOrder:true + +book_no:keyword +2464 +6970 +4814 +8956 +3950 +; + matchWithFunctionPushedToLucene required_capability: match_function diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec index 56f7f5ccd8823..7906f8b69162b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec @@ -102,6 +102,81 @@ book_no:keyword | title:text 7140 |The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) ; + +matchWithDisjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where author : "Vonnegut" or author : "Guinane" +| keep book_no, author; +ignoreOrder:true + +book_no:keyword | author:text +2464 | Kurt Vonnegut +6970 | Edith Vonnegut +8956 | Kurt Vonnegut +3950 | Kurt Vonnegut +4382 | Carole Guinane +; + +matchWithDisjunctionAndFiltersConjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where (author : "Vonnegut" or author : "Guinane") and year > 1997 +| keep book_no, author, year; +ignoreOrder:true + +book_no:keyword | author:text | year:integer +6970 | Edith Vonnegut | 1998 +4382 | Carole Guinane | 2001 +; + +matchWithDisjunctionAndConjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where (author : "Vonnegut" or author : "Marquez") and description : "realism" +| keep book_no; + +book_no:keyword +4814 +; + +matchWithMoreComplexDisjunctionAndConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (author : "Vonnegut" and description : "charming") or (author : "Marquez" and description : "realism") +| keep book_no; +ignoreOrder:true + +book_no:keyword +6970 +4814 +; + +matchWithDisjunctionIncludingConjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where author : "Vonnegut" or (author : "Marquez" and description : "realism") +| keep book_no; +ignoreOrder:true + +book_no:keyword +2464 +6970 +4814 +8956 +3950 +; + matchWithFunctionPushedToLucene required_capability: match_operator_colon @@ -219,7 +294,7 @@ count(*): long | author.keyword:keyword ; testMatchBooleanField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -235,7 +310,7 @@ Amabile | true | 2.09 ; testMatchIntegerField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -247,7 +322,7 @@ emp_no:integer | first_name:keyword ; testMatchDoubleField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -259,7 +334,7 @@ emp_no:integer | salary_change:double ; testMatchLongField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -271,7 +346,7 @@ num:long ; testMatchUnsignedLongField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from ul_logs @@ -283,7 +358,7 @@ bytes_out:unsigned_long ; testMatchIpFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from sample_data @@ -295,7 +370,7 @@ client_ip:ip | message:keyword ; testMatchDateFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -307,7 +382,7 @@ millis:date ; testMatchDateNanosFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -319,7 +394,7 @@ nanos:date_nanos ; testMatchBooleanFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -335,7 +410,7 @@ Amabile | true | 2.09 ; testMatchIntegerFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -347,7 +422,7 @@ emp_no:integer | first_name:keyword ; testMatchDoubleFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -359,7 +434,7 @@ emp_no:integer | salary_change:double ; testMatchLongFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -371,7 +446,7 @@ num:long ; testMatchUnsignedLongFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from ul_logs @@ -383,7 +458,7 @@ bytes_out:unsigned_long ; testMatchVersionFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from apps @@ -395,7 +470,7 @@ bbbbb | 2.1 ; testMatchIntegerAsDouble -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -408,7 +483,7 @@ emp_no:integer | first_name:keyword ; testMatchDoubleAsIntegerField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -423,7 +498,7 @@ emp_no:integer | height:double ; testMatchMultipleFieldTypes -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -440,7 +515,7 @@ emp_as_int:integer | name_as_kw:keyword testMatchMultipleFieldTypesKeywordText -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -455,7 +530,7 @@ Kazuhito ; testMatchMultipleFieldTypesDoubleFloat -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -474,7 +549,7 @@ emp_no:integer | height_dbl:double ; testMatchMultipleFieldTypesBooleanKeyword -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -491,7 +566,7 @@ true ; testMatchMultipleFieldTypesLongUnsignedLong -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -506,7 +581,7 @@ avg_worked_seconds_ul:unsigned_long ; testMatchMultipleFieldTypesDateNanosDate -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -521,7 +596,7 @@ hire_date_nanos:date_nanos ; testMatchWithWrongFieldValue -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec index cb38204a71ab0..72632c62603aa 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec @@ -13,9 +13,9 @@ from books metadata _score | sort c_score desc, book_no asc | LIMIT 2; -book_no:keyword | title:text | c_score:double -2675 | The Lord of the Rings - Boxed Set | 6.0 -4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 6.0 +book_no:keyword | title:text | c_score:double +1463 | Realms of Tolkien: Images of Middle-earth | 6.0 +2675 | The Lord of the Rings - Boxed Set | 6.0 ; singleMatchWithKeywordFieldScoring @@ -28,15 +28,15 @@ from books metadata _score | sort book_no; book_no:keyword | author:text | _score:double -2713 | William Faulkner | 2.3142893314361572 -2883 | William Faulkner | 2.3142893314361572 -4724 | William Faulkner | 2.3142893314361572 -4977 | William Faulkner | 2.3142893314361572 -5119 | William Faulkner | 2.3142893314361572 -5404 | William Faulkner | 2.3142893314361572 -5578 | William Faulkner | 2.3142893314361572 -8077 | William Faulkner | 2.3142893314361572 -9896 | William Faulkner | 2.3142893314361572 +2713 | William Faulkner | 1.7589385509490967 +2883 | William Faulkner | 1.7589385509490967 +4724 | William Faulkner | 1.7589385509490967 +4977 | William Faulkner | 2.6145541667938232 +5119 | William Faulkner | 2.513157367706299 +5404 | William Faulkner | 1.7589385509490967 +5578 | William Faulkner | 2.513157367706299 +8077 | William Faulkner | 1.7589385509490967 +9896 | William Faulkner | 2.6145541667938232 ; qstrWithFieldAndScoringSortedEval @@ -51,9 +51,9 @@ from books metadata _score | limit 3; book_no:keyword | title:text | _score:double -2675 | The Lord of the Rings - Boxed Set | 2.7583377361297607 -7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.9239964485168457 -2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9239964485168457 +2675 | The Lord of the Rings - Boxed Set | 2.5619282722473145 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9245924949645996 +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.746896743774414 ; qstrWithFieldAndScoringSorted @@ -67,9 +67,9 @@ from books metadata _score | limit 3; book_no:keyword | title:text | _score:double -2675 | The Lord of the Rings - Boxed Set | 2.7583377361297607 -7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.9239964485168457 -2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9239964485168457 +2675 | The Lord of the Rings - Boxed Set | 2.5619282722473145 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9245924949645996 +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.746896743774414 ; singleQstrScoringManipulated @@ -84,8 +84,8 @@ from books metadata _score | LIMIT 2; book_no:keyword | author:text | add_score:double -2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 2.0 -2713 | William Faulkner | 7.0 +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 3.0 +2713 | William Faulkner | 6.0 ; testMultiValuedFieldWithConjunctionWithScore @@ -125,7 +125,7 @@ from books metadata _score ignoreOrder:true book_no:keyword | title:text | author:text | _score:double -8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 14.489097595214844 +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 11.193471908569336 ; multipleWhereWithMatchScoring @@ -139,7 +139,7 @@ from books metadata _score | sort book_no; book_no:keyword | title:text | author:text | _score:double -8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 14.489097595214844 +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 11.193471908569336 ; combinedMatchWithFunctionsScoring @@ -153,7 +153,7 @@ from books metadata _score | sort book_no; book_no:keyword | title:text | author:text | year:integer | _score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.448054313659668 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 4.733664035797119 ; singleQstrScoring @@ -167,8 +167,8 @@ from books metadata _score | LIMIT 2; book_no:keyword | author:text | _score:double -2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 0.9976131916046143 -2713 | William Faulkner | 5.9556169509887695 +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 1.3697924613952637 +2713 | William Faulkner | 4.631696701049805 ; singleQstrScoringGrok @@ -183,9 +183,9 @@ from books metadata _score | LIMIT 3; book_no:keyword | title:keyword | _score:double -8875 | The | 2.9505908489227295 -4023 | A | 2.8327860832214355 -2675 | The | 2.7583377361297607 +8875 | The | 2.769660472869873 +1463 | Realms | 2.6714818477630615 +2675 | The | 2.5619282722473145 ; combinedMatchWithScoringEvalNoSort @@ -200,7 +200,7 @@ from books metadata _score ignoreOrder:true book_no:keyword | title:text | author:text | year:integer | c_score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 6 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.0 ; singleQstrScoringRename @@ -215,9 +215,9 @@ from books metadata _score | LIMIT 3; book_no:keyword | rank:double -8875 | 2.9505908489227295 -4023 | 2.8327860832214355 -2675 | 2.7583377361297607 +8875 | 2.769660472869873 +1463 | 2.6714818477630615 +2675 | 2.5619282722473145 ; singleMatchWithTextFieldScoring @@ -231,11 +231,11 @@ from books metadata _score | limit 5; book_no:keyword | author:text | _score:double -2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 0.9976131916046143 -2713 | William Faulkner | 4.272439002990723 -2847 | Colleen Faulkner | 1.7401835918426514 -2883 | William Faulkner | 4.272439002990723 -3293 | Danny Faulkner | 1.7401835918426514 +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 1.3697924613952637 +2713 | William Faulkner | 3.2750158309936523 +2847 | Colleen Faulkner | 1.593343734741211 +2883 | William Faulkner | 3.2750158309936523 +3293 | Danny Faulkner | 1.593343734741211 ; combinedMatchWithFunctionsScoringNoSort @@ -249,7 +249,7 @@ from books metadata _score ignoreOrder:true book_no:keyword | title:text | author:text | year:integer | _score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.448054313659668 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 4.733664035797119 ; combinedMatchWithScoringEval @@ -264,7 +264,7 @@ from books metadata _score | sort book_no; book_no:keyword | title:text | author:text | year:integer | c_score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 6 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.0 ; singleQstrScoringEval @@ -280,7 +280,7 @@ from books metadata _score book_no:keyword | c_score:double 8875 | 3.0 -7350 | 2.0 +7350 | 1.0 7140 | 3.0 ; @@ -289,14 +289,16 @@ required_capability: metadata_score required_capability: qstr_function from books metadata _score -| where qstr("title:rings") +| where qstr("title:gentle") | eval _score = _score + 1 | keep book_no, title, _score -| limit 2; +| limit 10 +; +ignoreOrder:true -book_no:keyword | title:text | _score:double -4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 2.6404519081115723 -2714 | Return of the King Being the Third Part of The Lord of the Rings | 2.9239964485168457 +book_no:keyword | title:text | _score:double +2924 | A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | 3.158426523208618 +5948 | That We Are Gentle Creatures | 3.727346897125244 ; QstrScoreOverride @@ -304,12 +306,14 @@ required_capability: metadata_score required_capability: qstr_function from books metadata _score -| where qstr("title:rings") +| where qstr("title:gentle") | eval _score = "foobar" | keep book_no, title, _score -| limit 2; +| limit 10 +; +ignoreOrder:true -book_no:keyword | title:text | _score:keyword -4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | foobar -2714 | Return of the King Being the Third Part of The Lord of the Rings | foobar +book_no:keyword | title:text | _score:keyword +2924 | A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | foobar +5948 | That We Are Gentle Creatures | foobar ; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java index 58b1652653ca3..ad90bbf6ae9db 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java @@ -168,7 +168,7 @@ public void testWhereMatchWithScoringNoSort() { var query = """ FROM test METADATA _score - | WHERE content:"fox" + | WHERE match(content, "fox") | KEEP id, _score """; @@ -182,7 +182,7 @@ public void testWhereMatchWithScoringNoSort() { public void testNonExistingColumn() { var query = """ FROM test - | WHERE something:"fox" + | WHERE match(something, "fox") """; var error = expectThrows(VerificationException.class, () -> run(query)); @@ -193,14 +193,14 @@ public void testWhereMatchEvalColumn() { var query = """ FROM test | EVAL upper_content = to_upper(content) - | WHERE upper_content:"FOX" + | WHERE match(upper_content, "FOX") | KEEP id """; var error = expectThrows(VerificationException.class, () -> run(query)); assertThat( error.getMessage(), - containsString("[:] operator cannot operate on [upper_content], which is not a field from an index mapping") + containsString("[MATCH] function cannot operate on [upper_content], which is not a field from an index mapping") ); } @@ -209,13 +209,13 @@ public void testWhereMatchOverWrittenColumn() { FROM test | DROP content | EVAL content = CONCAT("document with ID ", to_str(id)) - | WHERE content:"document" + | WHERE match(content, "document") """; var error = expectThrows(VerificationException.class, () -> run(query)); assertThat( error.getMessage(), - containsString("[:] operator cannot operate on [content], which is not a field from an index mapping") + containsString("[MATCH] function cannot operate on [content], which is not a field from an index mapping") ); } @@ -223,7 +223,7 @@ public void testWhereMatchAfterStats() { var query = """ FROM test | STATS count(*) - | WHERE content:"fox" + | WHERE match(content, "fox") """; var error = expectThrows(VerificationException.class, () -> run(query)); @@ -233,14 +233,15 @@ public void testWhereMatchAfterStats() { public void testWhereMatchWithFunctions() { var query = """ FROM test - | WHERE content:"fox" OR to_upper(content) == "FOX" + | WHERE match(content, "fox") OR to_upper(content) == "FOX" """; var error = expectThrows(ElasticsearchException.class, () -> run(query)); assertThat( error.getMessage(), containsString( - "Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. " - + "[:] operator can't be used as part of an or condition" + "Invalid condition [match(content, \"fox\") OR to_upper(content) == \"FOX\"]. " + + "Full text functions can be used in an OR condition," + + " but only if just full text functions are used in the OR condition" ) ); } @@ -248,24 +249,24 @@ public void testWhereMatchWithFunctions() { public void testWhereMatchWithRow() { var query = """ ROW content = "a brown fox" - | WHERE content:"fox" + | WHERE match(content, "fox") """; var error = expectThrows(ElasticsearchException.class, () -> run(query)); assertThat( error.getMessage(), - containsString("[:] operator cannot operate on [\"a brown fox\"], which is not a field from an index mapping") + containsString("[MATCH] function cannot operate on [\"a brown fox\"], which is not a field from an index mapping") ); } public void testMatchWithinEval() { var query = """ FROM test - | EVAL matches_query = content:"fox" + | EVAL matches_query = match(content, "fox") """; var error = expectThrows(VerificationException.class, () -> run(query)); - assertThat(error.getMessage(), containsString("[:] operator is only supported in WHERE commands")); + assertThat(error.getMessage(), containsString("[MATCH] function is only supported in WHERE commands")); } private void createAndPopulateIndex() { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java index d0a641f086fe4..758878b46d51f 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java @@ -216,7 +216,8 @@ public void testWhereMatchWithFunctions() { error.getMessage(), containsString( "Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. " - + "[:] operator can't be used as part of an or condition" + + "Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition" ) ); } diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashConstantEvaluator.java new file mode 100644 index 0000000000000..34cff73018634 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashConstantEvaluator.java @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Hash}. + * This class is generated. Do not edit it. + */ +public final class HashConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final BreakingBytesRefBuilder scratch; + + private final Hash.HashFunction algorithm; + + private final EvalOperator.ExpressionEvaluator input; + + private final DriverContext driverContext; + + private Warnings warnings; + + public HashConstantEvaluator(Source source, BreakingBytesRefBuilder scratch, + Hash.HashFunction algorithm, EvalOperator.ExpressionEvaluator input, + DriverContext driverContext) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock inputBlock = (BytesRefBlock) input.eval(page)) { + BytesRefVector inputVector = inputBlock.asVector(); + if (inputVector == null) { + return eval(page.getPositionCount(), inputBlock); + } + return eval(page.getPositionCount(), inputVector).asBlock(); + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock inputBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (inputBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (inputBlock.getValueCount(p) != 1) { + if (inputBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBytesRef(Hash.processConstant(this.scratch, this.algorithm, inputBlock.getBytesRef(inputBlock.getFirstValueIndex(p), inputScratch))); + } + return result.build(); + } + } + + public BytesRefVector eval(int positionCount, BytesRefVector inputVector) { + try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(Hash.processConstant(this.scratch, this.algorithm, inputVector.getBytesRef(p, inputScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "HashConstantEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(scratch, input); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final Function scratch; + + private final Function algorithm; + + private final EvalOperator.ExpressionEvaluator.Factory input; + + public Factory(Source source, Function scratch, + Function algorithm, + EvalOperator.ExpressionEvaluator.Factory input) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + } + + @Override + public HashConstantEvaluator get(DriverContext context) { + return new HashConstantEvaluator(source, scratch.apply(context), algorithm.apply(context), input.get(context), context); + } + + @Override + public String toString() { + return "HashConstantEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashEvaluator.java new file mode 100644 index 0000000000000..8b01cc0330142 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashEvaluator.java @@ -0,0 +1,174 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.security.NoSuchAlgorithmException; +import java.util.function.Function; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Hash}. + * This class is generated. Do not edit it. + */ +public final class HashEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final BreakingBytesRefBuilder scratch; + + private final EvalOperator.ExpressionEvaluator algorithm; + + private final EvalOperator.ExpressionEvaluator input; + + private final DriverContext driverContext; + + private Warnings warnings; + + public HashEvaluator(Source source, BreakingBytesRefBuilder scratch, + EvalOperator.ExpressionEvaluator algorithm, EvalOperator.ExpressionEvaluator input, + DriverContext driverContext) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock algorithmBlock = (BytesRefBlock) algorithm.eval(page)) { + try (BytesRefBlock inputBlock = (BytesRefBlock) input.eval(page)) { + BytesRefVector algorithmVector = algorithmBlock.asVector(); + if (algorithmVector == null) { + return eval(page.getPositionCount(), algorithmBlock, inputBlock); + } + BytesRefVector inputVector = inputBlock.asVector(); + if (inputVector == null) { + return eval(page.getPositionCount(), algorithmBlock, inputBlock); + } + return eval(page.getPositionCount(), algorithmVector, inputVector); + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock algorithmBlock, + BytesRefBlock inputBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef algorithmScratch = new BytesRef(); + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (algorithmBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (algorithmBlock.getValueCount(p) != 1) { + if (algorithmBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (inputBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (inputBlock.getValueCount(p) != 1) { + if (inputBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendBytesRef(Hash.process(this.scratch, algorithmBlock.getBytesRef(algorithmBlock.getFirstValueIndex(p), algorithmScratch), inputBlock.getBytesRef(inputBlock.getFirstValueIndex(p), inputScratch))); + } catch (NoSuchAlgorithmException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public BytesRefBlock eval(int positionCount, BytesRefVector algorithmVector, + BytesRefVector inputVector) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef algorithmScratch = new BytesRef(); + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendBytesRef(Hash.process(this.scratch, algorithmVector.getBytesRef(p, algorithmScratch), inputVector.getBytesRef(p, inputScratch))); + } catch (NoSuchAlgorithmException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "HashEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(scratch, algorithm, input); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final Function scratch; + + private final EvalOperator.ExpressionEvaluator.Factory algorithm; + + private final EvalOperator.ExpressionEvaluator.Factory input; + + public Factory(Source source, Function scratch, + EvalOperator.ExpressionEvaluator.Factory algorithm, + EvalOperator.ExpressionEvaluator.Factory input) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + } + + @Override + public HashEvaluator get(DriverContext context) { + return new HashEvaluator(source, scratch.apply(context), algorithm.get(context), input.get(context), context); + } + + @Override + public String toString() { + return "HashEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 235d0dcbe4164..d6c1539088d47 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -352,7 +352,10 @@ public enum Cap { * Support for mixed comparisons between nanosecond and millisecond dates */ DATE_NANOS_COMPARE_TO_MILLIS(), - + /** + * Support implicit casting of strings to date nanos + */ + DATE_NANOS_IMPLICIT_CASTING(), /** * Support Least and Greatest functions on Date Nanos type */ @@ -446,6 +449,11 @@ public enum Cap { */ KQL_FUNCTION(Build.current().isSnapshot()), + /** + * Hash function + */ + HASH_FUNCTION, + /** * Don't optimize CASE IS NOT NULL function by not requiring the fields to be not null as well. * https://github.com/elastic/elasticsearch/issues/112704 @@ -547,7 +555,7 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V6(Build.current().isSnapshot()), + JOIN_LOOKUP_V7(Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 @@ -582,7 +590,12 @@ public enum Cap { /** * Fix for regex folding with case-insensitive pattern https://github.com/elastic/elasticsearch/issues/118371 */ - FIXED_REGEX_FOLD; + FIXED_REGEX_FOLD, + + /** + * Full text functions can be used in disjunctions + */ + FULL_TEXT_FUNCTIONS_DISJUNCTIONS; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index d59745f03f608..e15731ca79038 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -118,6 +118,7 @@ import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; @@ -1050,21 +1051,23 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { /** * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison, In and GroupingFunction to desired data types. * For example, the string literals in the following expressions will be cast implicitly to the field data type on the left hand side. - * date > "2024-08-21" - * date in ("2024-08-21", "2024-08-22", "2024-08-23") - * date = "2024-08-21" + 3 days - * ip == "127.0.0.1" - * version != "1.0" - * bucket(dateField, "1 month") - * date_trunc("1 minute", dateField) - * + *
    + *
  • date > "2024-08-21"
  • + *
  • date in ("2024-08-21", "2024-08-22", "2024-08-23")
  • + *
  • date = "2024-08-21" + 3 days
  • + *
  • ip == "127.0.0.1"
  • + *
  • version != "1.0"
  • + *
  • bucket(dateField, "1 month")
  • + *
  • date_trunc("1 minute", dateField)
  • + *
* If the inputs to Coalesce are mixed numeric types, cast the rest of the numeric field or value to the first numeric data type if * applicable. For example, implicit casting converts: - * Coalesce(Long, Int) to Coalesce(Long, Long) - * Coalesce(null, Long, Int) to Coalesce(null, Long, Long) - * Coalesce(Double, Long, Int) to Coalesce(Double, Double, Double) - * Coalesce(null, Double, Long, Int) to Coalesce(null, Double, Double, Double) - * + *
    + *
  • Coalesce(Long, Int) to Coalesce(Long, Long)
  • + *
  • Coalesce(null, Long, Int) to Coalesce(null, Long, Long)
  • + *
  • Coalesce(Double, Long, Int) to Coalesce(Double, Double, Double)
  • + *
  • Coalesce(null, Double, Long, Int) to Coalesce(null, Double, Double, Double)
  • + *
* Coalesce(Int, Long) will NOT be converted to Coalesce(Long, Long) or Coalesce(Int, Int). */ private static class ImplicitCasting extends ParameterizedRule { @@ -1245,7 +1248,7 @@ private static boolean supportsImplicitTemporalCasting(Expression e, BinaryOpera } private static boolean supportsStringImplicitCasting(DataType type) { - return type == DATETIME || type == IP || type == VERSION || type == BOOLEAN; + return type == DATETIME || type == DATE_NANOS || type == IP || type == VERSION || type == BOOLEAN; } private static UnresolvedAttribute unresolvedAttribute(Expression value, String type, Exception e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index f01cc265e330b..6b98b7d69834f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -766,41 +766,78 @@ private static void checkRemoteEnrich(LogicalPlan plan, Set failures) { } /** - * Checks whether a condition contains a disjunction with the specified typeToken. Adds to failure if it does. + * Checks whether a condition contains a disjunction with a full text search. + * If it does, check that every element of the disjunction is a full text search or combinations (AND, OR, NOT) of them. + * If not, add a failure to the failures collection. * - * @param condition condition to check for disjunctions + * @param condition condition to check for disjunctions of full text searches * @param typeNameProvider provider for the type name to add in the failure message * @param failures failures collection to add to */ - private static void checkNotPresentInDisjunctions( + private static void checkFullTextSearchDisjunctions( Expression condition, java.util.function.Function typeNameProvider, Set failures ) { - condition.forEachUp(Or.class, or -> { - checkNotPresentInDisjunctions(or.left(), or, typeNameProvider, failures); - checkNotPresentInDisjunctions(or.right(), or, typeNameProvider, failures); + int failuresCount = failures.size(); + condition.forEachDown(Or.class, or -> { + if (failures.size() > failuresCount) { + // Exit early if we already have a failures + return; + } + boolean hasFullText = or.anyMatch(FullTextFunction.class::isInstance); + if (hasFullText) { + boolean hasOnlyFullText = onlyFullTextFunctionsInExpression(or); + if (hasOnlyFullText == false) { + failures.add( + fail( + or, + "Invalid condition [{}]. Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition", + or.sourceText() + ) + ); + } + } }); } /** - * Checks whether a condition contains a disjunction with the specified typeToken. Adds to failure if it does. + * Checks whether an expression contains just full text functions or negations (NOT) and combinations (AND, OR) of full text functions * - * @param parentExpression parent expression to add to the failure message - * @param or disjunction that is being checked - * @param failures failures collection to add to + * @param expression expression to check + * @return true if all children are full text functions or negations of full text functions, false otherwise */ - private static void checkNotPresentInDisjunctions( - Expression parentExpression, - Or or, - java.util.function.Function elementName, - Set failures - ) { - parentExpression.forEachDown(FullTextFunction.class, ftp -> { - failures.add( - fail(or, "Invalid condition [{}]. {} can't be used as part of an or condition", or.sourceText(), elementName.apply(ftp)) - ); - }); + private static boolean onlyFullTextFunctionsInExpression(Expression expression) { + if (expression instanceof FullTextFunction) { + return true; + } else if (expression instanceof Not) { + return onlyFullTextFunctionsInExpression(expression.children().get(0)); + } else if (expression instanceof BinaryLogic binaryLogic) { + return onlyFullTextFunctionsInExpression(binaryLogic.left()) && onlyFullTextFunctionsInExpression(binaryLogic.right()); + } + + return false; + } + + /** + * Checks whether an expression contains a full text function as part of it + * + * @param expression expression to check + * @return true if the expression or any of its children is a full text function, false otherwise + */ + private static boolean anyFullTextFunctionsInExpression(Expression expression) { + if (expression instanceof FullTextFunction) { + return true; + } + + for (Expression child : expression.children()) { + if (anyFullTextFunctionsInExpression(child)) { + return true; + } + } + + return false; } /** @@ -870,7 +907,7 @@ private static void checkFullTextQueryFunctions(LogicalPlan plan, Set f m -> "[" + m.functionName() + "] " + m.functionType(), failures ); - checkNotPresentInDisjunctions(condition, ftf -> "[" + ftf.functionName() + "] " + ftf.functionType(), failures); + checkFullTextSearchDisjunctions(condition, ftf -> "[" + ftf.functionName() + "] " + ftf.functionType(), failures); checkFullTextFunctionsParents(condition, failures); } else { plan.forEachExpression(FullTextFunction.class, ftf -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index a59ef5bb1575d..908c9c5f197a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -129,6 +129,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Hash; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; @@ -327,6 +328,7 @@ private static FunctionDefinition[][] functions() { def(ByteLength.class, ByteLength::new, "byte_length"), def(Concat.class, Concat::new, "concat"), def(EndsWith.class, EndsWith::new, "ends_with"), + def(Hash.class, Hash::new, "hash"), def(LTrim.class, LTrim::new, "ltrim"), def(Left.class, Left::new, "left"), def(Length.class, Length::new, "length"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java index 192ca6c43e57d..3cf0eef9074ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Hash; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Repeat; @@ -64,6 +65,7 @@ public static List getNamedWriteables() { entries.add(E.ENTRY); entries.add(EndsWith.ENTRY); entries.add(Greatest.ENTRY); + entries.add(Hash.ENTRY); entries.add(Hypot.ENTRY); entries.add(In.ENTRY); entries.add(InsensitiveEquals.ENTRY); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java index 7f9d0d3f2e647..832c511a2dc50 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java @@ -30,6 +30,7 @@ import java.util.Base64; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -85,7 +86,7 @@ protected NodeInfo info() { } @Evaluator() - static BytesRef process(BytesRef field, @Fixed(includeInToString = false, build = true) BytesRefBuilder oScratch) { + static BytesRef process(BytesRef field, @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRefBuilder oScratch) { byte[] bytes = new byte[field.length]; System.arraycopy(field.bytes, field.offset, bytes, 0, field.length); oScratch.grow(field.length); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java index c23cef31f32f5..e78968bb209b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java @@ -30,6 +30,7 @@ import java.util.Base64; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -78,7 +79,7 @@ protected NodeInfo info() { } @Evaluator(warnExceptions = { ArithmeticException.class }) - static BytesRef process(BytesRef field, @Fixed(includeInToString = false, build = true) BytesRefBuilder oScratch) { + static BytesRef process(BytesRef field, @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRefBuilder oScratch) { int outLength = Math.multiplyExact(4, (Math.addExact(field.length, 2) / 3)); byte[] bytes = new byte[field.length]; System.arraycopy(field.bytes, field.offset, bytes, 0, field.length); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java index 26e75e752f681..5fc61c5c07b58 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.THIRD; @@ -138,7 +139,7 @@ static BytesRef process( BytesRef ip, int prefixLengthV4, int prefixLengthV6, - @Fixed(includeInToString = false, build = true) BytesRef scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef scratch ) { if (prefixLengthV4 < 0 || prefixLengthV4 > 32) { throw new IllegalArgumentException("Prefix length v4 must be in range [0, 32], found " + prefixLengthV4); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java index cf49607893aae..4dd447f938880 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java @@ -33,6 +33,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; @@ -144,7 +145,7 @@ static void process( DoubleBlock.Builder builder, int position, DoubleBlock block, - @Fixed(includeInToString = false, build = true) CompensatedSum sum, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) CompensatedSum sum, @Fixed double p ) { sum.reset(0, 0); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java index f3a63c835bd34..4e4aee307f1c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java @@ -35,6 +35,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -167,7 +168,7 @@ static void process( int position, DoubleBlock values, double percentile, - @Fixed(includeInToString = false, build = true) DoubleSortingScratch scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) DoubleSortingScratch scratch ) { int valueCount = values.getValueCount(position); int firstValueIndex = values.getFirstValueIndex(position); @@ -190,7 +191,7 @@ static void process( int position, IntBlock values, double percentile, - @Fixed(includeInToString = false, build = true) IntSortingScratch scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) IntSortingScratch scratch ) { int valueCount = values.getValueCount(position); int firstValueIndex = values.getFirstValueIndex(position); @@ -213,7 +214,7 @@ static void process( int position, LongBlock values, double percentile, - @Fixed(includeInToString = false, build = true) LongSortingScratch scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) LongSortingScratch scratch ) { int valueCount = values.getValueCount(position); int firstValueIndex = values.getFirstValueIndex(position); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java index 46ecc9e026d3d..eb173029876d3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java @@ -32,6 +32,7 @@ import java.util.stream.Stream; import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -111,7 +112,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { } @Evaluator - static BytesRef process(@Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, BytesRef[] values) { + static BytesRef process(@Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, BytesRef[] values) { scratch.grow(checkedTotalLength(values)); scratch.clear(); for (int i = 0; i < values.length; i++) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Hash.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Hash.java new file mode 100644 index 0000000000000..99c5908699ec2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Hash.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; + +public class Hash extends EsqlScalarFunction { + + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Hash", Hash::new); + + private final Expression algorithm; + private final Expression input; + + @FunctionInfo( + returnType = "keyword", + description = "Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512." + ) + public Hash( + Source source, + @Param(name = "algorithm", type = { "keyword", "text" }, description = "Hash algorithm to use.") Expression algorithm, + @Param(name = "input", type = { "keyword", "text" }, description = "Input to hash.") Expression input + ) { + super(source, List.of(algorithm, input)); + this.algorithm = algorithm; + this.input = input; + } + + private Hash(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(algorithm); + out.writeNamedWriteable(input); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(algorithm, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + return isString(input, sourceText(), SECOND); + } + + @Override + public boolean foldable() { + return algorithm.foldable() && input.foldable(); + } + + @Evaluator(warnExceptions = NoSuchAlgorithmException.class) + static BytesRef process( + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, + BytesRef algorithm, + BytesRef input + ) throws NoSuchAlgorithmException { + return hash(scratch, MessageDigest.getInstance(algorithm.utf8ToString()), input); + } + + @Evaluator(extraName = "Constant") + static BytesRef processConstant( + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, + @Fixed(scope = THREAD_LOCAL) HashFunction algorithm, + BytesRef input + ) { + return hash(scratch, algorithm.digest, input); + } + + private static BytesRef hash(BreakingBytesRefBuilder scratch, MessageDigest algorithm, BytesRef input) { + algorithm.reset(); + algorithm.update(input.bytes, input.offset, input.length); + var digest = algorithm.digest(); + scratch.clear(); + scratch.grow(digest.length * 2); + appendUtf8HexDigest(scratch, digest); + return scratch.bytesRefView(); + } + + private static final byte[] ASCII_HEX_BYTES = new byte[] { 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102 }; + + /** + * This function allows to append hex bytes dirrectly to the {@link BreakingBytesRefBuilder} + * bypassing unnecessary array allocations and byte array copying. + */ + private static void appendUtf8HexDigest(BreakingBytesRefBuilder scratch, byte[] bytes) { + for (byte b : bytes) { + scratch.append(ASCII_HEX_BYTES[b >> 4 & 0xf]); + scratch.append(ASCII_HEX_BYTES[b & 0xf]); + } + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (algorithm.foldable()) { + try { + // hash function is created here in order to validate the algorithm is valid before evaluator is created + var hf = HashFunction.create((BytesRef) algorithm.fold()); + return new HashConstantEvaluator.Factory( + source(), + context -> new BreakingBytesRefBuilder(context.breaker(), "hash"), + new Function<>() { + @Override + public HashFunction apply(DriverContext context) { + return hf.copy(); + } + + @Override + public String toString() { + return hf.toString(); + } + }, + toEvaluator.apply(input) + ); + } catch (NoSuchAlgorithmException e) { + throw new InvalidArgumentException(e, "invalid algorithm for [{}]: {}", sourceText(), e.getMessage()); + } + } else { + return new HashEvaluator.Factory( + source(), + context -> new BreakingBytesRefBuilder(context.breaker(), "hash"), + toEvaluator.apply(algorithm), + toEvaluator.apply(input) + ); + } + } + + @Override + public Expression replaceChildren(List newChildren) { + return new Hash(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Hash::new, children().get(0), children().get(1)); + } + + public record HashFunction(String algorithm, MessageDigest digest) { + + public static HashFunction create(BytesRef literal) throws NoSuchAlgorithmException { + var algorithm = literal.utf8ToString(); + var digest = MessageDigest.getInstance(algorithm); + return new HashFunction(algorithm, digest); + } + + public HashFunction copy() { + try { + return new HashFunction(algorithm, MessageDigest.getInstance(algorithm)); + } catch (NoSuchAlgorithmException e) { + assert false : "Algorithm should be valid at this point"; + throw new IllegalStateException(e); + } + } + + @Override + public String toString() { + return algorithm; + } + } + + Expression algorithm() { + return algorithm; + } + + Expression input() { + return input; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java index e7572caafd8f5..0d885e3f3c341 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -77,8 +78,8 @@ public String getWriteableName() { @Evaluator static BytesRef process( - @Fixed(includeInToString = false, build = true) BytesRef out, - @Fixed(includeInToString = false, build = true) UnicodeUtil.UTF8CodePoint cp, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef out, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) UnicodeUtil.UTF8CodePoint cp, BytesRef str, int length ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java index 2cc14399df2ae..e91f03de3dd7e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -101,7 +102,7 @@ public boolean foldable() { @Evaluator(extraName = "Constant", warnExceptions = { IllegalArgumentException.class }) static BytesRef processConstantNumber( - @Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, BytesRef str, @Fixed int number ) { @@ -109,7 +110,11 @@ static BytesRef processConstantNumber( } @Evaluator(warnExceptions = { IllegalArgumentException.class }) - static BytesRef process(@Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, BytesRef str, int number) { + static BytesRef process( + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, + BytesRef str, + int number + ) { if (number < 0) { throw new IllegalArgumentException("Number parameter cannot be negative, found [" + number + "]"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java index b069b984ea81e..e0ebed29cca72 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -77,8 +78,8 @@ public String getWriteableName() { @Evaluator static BytesRef process( - @Fixed(includeInToString = false, build = true) BytesRef out, - @Fixed(includeInToString = false, build = true) UnicodeUtil.UTF8CodePoint cp, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef out, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) UnicodeUtil.UTF8CodePoint cp, BytesRef str, int length ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java index 6481ce5764e1f..3b9a466966911 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -82,7 +83,7 @@ protected TypeResolution resolveType() { } @Evaluator(warnExceptions = { IllegalArgumentException.class }) - static BytesRef process(@Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, int number) { + static BytesRef process(@Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, int number) { checkNumber(number); scratch.grow(number); scratch.setLength(number); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java index b1f5da56d011b..24762122f755b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java @@ -29,6 +29,7 @@ import java.io.IOException; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isStringAndExact; @@ -110,7 +111,7 @@ static void process( BytesRefBlock.Builder builder, BytesRef str, @Fixed byte delim, - @Fixed(includeInToString = false, build = true) BytesRef scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef scratch ) { scratch.bytes = str.bytes; scratch.offset = str.offset; @@ -140,7 +141,7 @@ static void process( BytesRefBlock.Builder builder, BytesRef str, BytesRef delim, - @Fixed(includeInToString = false, build = true) BytesRef scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef scratch ) { checkDelimiter(delim); process(builder, str, delim.bytes[delim.offset], scratch); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java index 096f72f7694e1..f9d86ecf0f61a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; @@ -41,10 +42,17 @@ public class ReplaceMissingFieldWithNull extends ParameterizedRule missingToNull(p, localLogicalOptimizerContext.searchStats())); + AttributeSet lookupFields = new AttributeSet(); + plan.forEachUp(EsRelation.class, esRelation -> { + if (esRelation.indexMode() == IndexMode.LOOKUP) { + lookupFields.addAll(esRelation.output()); + } + }); + + return plan.transformUp(p -> missingToNull(p, localLogicalOptimizerContext.searchStats(), lookupFields)); } - private LogicalPlan missingToNull(LogicalPlan plan, SearchStats stats) { + private LogicalPlan missingToNull(LogicalPlan plan, SearchStats stats, AttributeSet lookupFields) { if (plan instanceof EsRelation || plan instanceof LocalRelation) { return plan; } @@ -95,7 +103,8 @@ else if (plan instanceof Project project) { plan = plan.transformExpressionsOnlyUp( FieldAttribute.class, // Do not use the attribute name, this can deviate from the field name for union types. - f -> stats.exists(f.fieldName()) ? f : Literal.of(f, null) + // Also skip fields from lookup indices because we do not have stats for these. + f -> stats.exists(f.fieldName()) || lookupFields.contains(f) ? f : Literal.of(f, null) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 6ba2d8451f956..0847f71b1fb01 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -13,6 +13,8 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; @@ -51,7 +53,6 @@ import java.time.Period; import java.time.ZoneId; import java.time.temporal.ChronoField; -import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalAmount; import java.util.List; import java.util.Locale; @@ -200,6 +201,9 @@ public static Converter converterFor(DataType from, DataType to) { if (to == DataType.DATETIME) { return EsqlConverter.STRING_TO_DATETIME; } + if (to == DATE_NANOS) { + return EsqlConverter.STRING_TO_DATE_NANOS; + } if (to == DataType.IP) { return EsqlConverter.STRING_TO_IP; } @@ -514,13 +518,12 @@ public static long dateTimeToLong(String dateTime, DateFormatter formatter) { } public static long dateNanosToLong(String dateNano) { - return dateNanosToLong(dateNano, DateFormatter.forPattern("strict_date_optional_time_nanos")); + return dateNanosToLong(dateNano, DEFAULT_DATE_NANOS_FORMATTER); } public static long dateNanosToLong(String dateNano, DateFormatter formatter) { - TemporalAccessor parsed = formatter.parse(dateNano); - long nanos = parsed.getLong(ChronoField.INSTANT_SECONDS) * 1_000_000_000 + parsed.getLong(ChronoField.NANO_OF_SECOND); - return nanos; + Instant parsed = DateFormatters.from(formatter.parse(dateNano)).toInstant(); + return DateUtils.toLong(parsed); } public static String dateTimeToString(long dateTime) { @@ -639,6 +642,7 @@ public enum EsqlConverter implements Converter { STRING_TO_TIME_DURATION(x -> EsqlDataTypeConverter.parseTemporalAmount(x, DataType.TIME_DURATION)), STRING_TO_CHRONO_FIELD(EsqlDataTypeConverter::stringToChrono), STRING_TO_DATETIME(x -> EsqlDataTypeConverter.dateTimeToLong((String) x)), + STRING_TO_DATE_NANOS(x -> EsqlDataTypeConverter.dateNanosToLong((String) x)), STRING_TO_IP(x -> EsqlDataTypeConverter.stringToIP((String) x)), STRING_TO_VERSION(x -> EsqlDataTypeConverter.stringToVersion((String) x)), STRING_TO_DOUBLE(x -> EsqlDataTypeConverter.stringToDouble((String) x)), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index f553c15ef69fa..717ac7b5a62a7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -263,7 +263,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V6.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V7.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 35364089127cc..2deedb927331d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -204,7 +204,7 @@ private Page randomPage(List columns) { case BOOLEAN -> ((BooleanBlock.Builder) builder).appendBoolean(randomBoolean()); case UNSUPPORTED -> ((BytesRefBlock.Builder) builder).appendNull(); // TODO - add a random instant thing here? - case DATE_NANOS -> ((LongBlock.Builder) builder).appendLong(randomLong()); + case DATE_NANOS -> ((LongBlock.Builder) builder).appendLong(randomNonNegativeLong()); case VERSION -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); case GEO_POINT -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.asWkb(GeometryTestUtils.randomPoint())); case CARTESIAN_POINT -> ((BytesRefBlock.Builder) builder).appendBytesRef(CARTESIAN.asWkb(ShapeTestUtils.randomPoint())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 4e02119b31744..9c71f20dcde0e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2139,7 +2139,7 @@ public void testLookupMatchTypeWrong() { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2168,7 +2168,7 @@ public void testLookupJoinUnknownIndex() { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 58180aafedc0b..a1e29117a25d3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -1166,12 +1166,14 @@ public void testMatchInsideEval() throws Exception { public void testMatchFilter() throws Exception { assertEquals( "1:19: Invalid condition [first_name:\"Anna\" or starts_with(first_name, \"Anne\")]. " - + "[:] operator can't be used as part of an or condition", + + "Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition", error("from test | where first_name:\"Anna\" or starts_with(first_name, \"Anne\")") ); assertEquals( - "1:51: Invalid condition [first_name:\"Anna\" OR new_salary > 100]. " + "[:] operator can't be used as part of an or condition", + "1:51: Invalid condition [first_name:\"Anna\" OR new_salary > 100]. Full text functions can be" + + " used in an OR condition, but only if just full text functions are used in the OR condition", error("from test | eval new_salary = salary + 10 | where first_name:\"Anna\" OR new_salary > 100") ); } @@ -1409,48 +1411,56 @@ public void testMatchOperatorWithDisjunctions() { } private void checkWithDisjunctions(String functionName, String functionInvocation, String functionType) { + String expression = functionInvocation + " or length(first_name) > 12"; + checkdisjunctionError("1:19", expression, functionName, functionType); + expression = "(" + functionInvocation + " or first_name is not null) or (length(first_name) > 12 and match(last_name, \"Smith\"))"; + checkdisjunctionError("1:19", expression, functionName, functionType); + expression = functionInvocation + " or (last_name is not null and first_name is null)"; + checkdisjunctionError("1:19", expression, functionName, functionType); + } + + private void checkdisjunctionError(String position, String expression, String functionName, String functionType) { assertEquals( LoggerMessageFormat.format( null, - "1:19: Invalid condition [{} or length(first_name) > 12]. " - + "[{}] " - + functionType - + " can't be used as part of an or condition", - functionInvocation, - functionName - ), - error("from test | where " + functionInvocation + " or length(first_name) > 12") - ); - assertEquals( - LoggerMessageFormat.format( - null, - "1:19: Invalid condition [({} and first_name is not null) or (length(first_name) > 12 and first_name is null)]. " - + "[{}] " - + functionType - + " can't be used as part of an or condition", - functionInvocation, - functionName - ), - error( - "from test | where (" - + functionInvocation - + " and first_name is not null) or (length(first_name) > 12 and first_name is null)" - ) - ); - assertEquals( - LoggerMessageFormat.format( - null, - "1:19: Invalid condition [({} and first_name is not null) or first_name is null]. " - + "[{}] " - + functionType - + " can't be used as part of an or condition", - functionInvocation, - functionName + "{}: Invalid condition [{}]. Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition", + position, + expression ), - error("from test | where (" + functionInvocation + " and first_name is not null) or first_name is null") + error("from test | where " + expression) ); } + public void testFullTextFunctionsDisjunctions() { + checkWithFullTextFunctionsDisjunctions("MATCH", "match(last_name, \"Smith\")", "function"); + checkWithFullTextFunctionsDisjunctions(":", "last_name : \"Smith\"", "operator"); + checkWithFullTextFunctionsDisjunctions("QSTR", "qstr(\"last_name: Smith\")", "function"); + + assumeTrue("KQL function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + checkWithFullTextFunctionsDisjunctions("KQL", "kql(\"last_name: Smith\")", "function"); + } + + private void checkWithFullTextFunctionsDisjunctions(String functionName, String functionInvocation, String functionType) { + + String expression = functionInvocation + " or length(first_name) > 10"; + checkdisjunctionError("1:19", expression, functionName, functionType); + + expression = "match(last_name, \"Anneke\") or (" + functionInvocation + " and length(first_name) > 10)"; + checkdisjunctionError("1:19", expression, functionName, functionType); + + expression = "(" + + functionInvocation + + " and length(first_name) > 0) or (match(last_name, \"Anneke\") and length(first_name) > 10)"; + checkdisjunctionError("1:19", expression, functionName, functionType); + + query("from test | where " + functionInvocation + " or match(first_name, \"Anna\")"); + query("from test | where " + functionInvocation + " or not match(first_name, \"Anna\")"); + query("from test | where (" + functionInvocation + " or match(first_name, \"Anna\")) and length(first_name) > 10"); + query("from test | where (" + functionInvocation + " or match(first_name, \"Anna\")) and match(last_name, \"Smith\")"); + query("from test | where " + functionInvocation + " or (match(first_name, \"Anna\") and match(last_name, \"Smith\"))"); + } + public void testQueryStringFunctionWithNonBooleanFunctions() { checkFullTextFunctionsWithNonBooleanFunctions("QSTR", "qstr(\"first_name: Anna\")", "function"); } @@ -1964,7 +1974,7 @@ public void testSortByAggregate() { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 6dd0c5fe88afd..050293e58c19d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -14,10 +14,15 @@ import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; public abstract class AbstractExpressionSerializationTests extends AbstractNodeSerializationTests { + public static Expression randomChild() { return ReferenceAttributeTests.randomReferenceAttribute(false); } + public static Expression mutateExpression(Expression expression) { + return randomValueOtherThan(expression, AbstractExpressionSerializationTests::randomChild); + } + @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(ExpressionWritables.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashSerializationTests.java new file mode 100644 index 0000000000000..f21105c2c8bca --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashSerializationTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class HashSerializationTests extends AbstractExpressionSerializationTests { + + @Override + protected Hash createTestInstance() { + return new Hash(randomSource(), randomChild(), randomChild()); + } + + @Override + protected Hash mutateInstance(Hash instance) throws IOException { + return randomBoolean() + ? new Hash(instance.source(), mutateExpression(instance.algorithm()), instance.input()) + : new Hash(instance.source(), instance.algorithm(), mutateExpression(instance.input())); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashStaticTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashStaticTests.java new file mode 100644 index 0000000000000..871bec7c06804 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashStaticTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase.evaluator; +import static org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase.field; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; + +public class HashStaticTests extends ESTestCase { + + public void testInvalidAlgorithmLiteral() { + Source source = new Source(0, 0, "hast(\"invalid\", input)"); + DriverContext driverContext = driverContext(); + InvalidArgumentException e = expectThrows( + InvalidArgumentException.class, + () -> evaluator( + new Hash(source, new Literal(source, new BytesRef("invalid"), DataType.KEYWORD), field("input", DataType.KEYWORD)) + ).get(driverContext) + ); + assertThat(e.getMessage(), startsWith("invalid algorithm for [hast(\"invalid\", input)]: invalid MessageDigest not available")); + } + + /** + * The following fields and methods were borrowed from AbstractScalarFunctionTestCase + */ + private final List breakers = Collections.synchronizedList(new ArrayList<>()); + + private DriverContext driverContext() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(256)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + return new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + } + + @After + public void allMemoryReleased() { + for (CircuitBreaker breaker : breakers) { + assertThat(breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashTests.java new file mode 100644 index 0000000000000..c5cdf97eccd17 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.HexFormat; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class HashTests extends AbstractScalarFunctionTestCase { + + public HashTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + for (String algorithm : List.of("MD5", "SHA", "SHA-224", "SHA-256", "SHA-384", "SHA-512")) { + cases.addAll(createTestCases(algorithm)); + } + cases.add(new TestCaseSupplier("Invalid algorithm", List.of(DataType.KEYWORD, DataType.KEYWORD), () -> { + var input = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("invalid"), DataType.KEYWORD, "algorithm"), + new TestCaseSupplier.TypedData(new BytesRef(input), DataType.KEYWORD, "input") + ), + "HashEvaluator[algorithm=Attribute[channel=0], input=Attribute[channel=1]]", + DataType.KEYWORD, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning("Line -1:-1: java.security.NoSuchAlgorithmException: invalid MessageDigest not available") + .withFoldingException(InvalidArgumentException.class, "invalid algorithm for []: invalid MessageDigest not available"); + })); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases, (v, p) -> "string"); + } + + private static List createTestCases(String algorithm) { + return List.of( + createTestCase(algorithm, false, DataType.KEYWORD, DataType.KEYWORD), + createTestCase(algorithm, false, DataType.KEYWORD, DataType.TEXT), + createTestCase(algorithm, false, DataType.TEXT, DataType.KEYWORD), + createTestCase(algorithm, false, DataType.TEXT, DataType.TEXT), + createTestCase(algorithm, true, DataType.KEYWORD, DataType.KEYWORD), + createTestCase(algorithm, true, DataType.KEYWORD, DataType.TEXT), + createTestCase(algorithm, true, DataType.TEXT, DataType.KEYWORD), + createTestCase(algorithm, true, DataType.TEXT, DataType.TEXT) + ); + } + + private static TestCaseSupplier createTestCase(String algorithm, boolean forceLiteral, DataType algorithmType, DataType inputType) { + return new TestCaseSupplier(algorithm, List.of(algorithmType, inputType), () -> { + var input = randomFrom(TestCaseSupplier.stringCases(inputType)).get(); + return new TestCaseSupplier.TestCase( + List.of(createTypedData(algorithm, forceLiteral, algorithmType, "algorithm"), input), + forceLiteral + ? "HashConstantEvaluator[algorithm=" + algorithm + ", input=Attribute[channel=0]]" + : "HashEvaluator[algorithm=Attribute[channel=0], input=Attribute[channel=1]]", + DataType.KEYWORD, + equalTo(new BytesRef(hash(algorithm, BytesRefs.toString(input.data())))) + ); + }); + } + + private static TestCaseSupplier.TypedData createTypedData(String value, boolean forceLiteral, DataType type, String name) { + var data = new TestCaseSupplier.TypedData(new BytesRef(value), type, name); + return forceLiteral ? data.forceLiteral() : data; + } + + private static String hash(String algorithm, String input) { + try { + return HexFormat.of().formatHex(MessageDigest.getInstance(algorithm).digest(input.getBytes(StandardCharsets.UTF_8))); + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException("Unknown algorithm: " + algorithm); + } + } + + @Override + protected Expression build(Source source, List args) { + return new Hash(source, args.get(0), args.get(1)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 879a413615202..406e27c1517e5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.license.XPackLicenseState; @@ -57,6 +58,7 @@ import org.elasticsearch.xpack.esql.plan.physical.EvalExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -1543,6 +1545,46 @@ public void testMultipleMatchFilterPushdown() { assertThat(actualLuceneQuery.toString(), is(expectedLuceneQuery.toString())); } + public void testFullTextFunctionsDisjunctionPushdown() { + String query = """ + from test + | where (match(first_name, "Anna") or qstr("first_name: Anneke")) and last_name: "Smith" + | sort emp_no + """; + var plan = plannerOptimizer.plan(query); + var topNExec = as(plan, TopNExec.class); + var exchange = as(topNExec.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var actualLuceneQuery = as(fieldExtract.child(), EsQueryExec.class).query(); + var expectedLuceneQuery = new BoolQueryBuilder().must( + new BoolQueryBuilder().should(new MatchQueryBuilder("first_name", "Anna").lenient(true)) + .should(new QueryStringQueryBuilder("first_name: Anneke")) + ).must(new MatchQueryBuilder("last_name", "Smith").lenient(true)); + assertThat(actualLuceneQuery.toString(), is(expectedLuceneQuery.toString())); + } + + public void testFullTextFunctionsDisjunctionWithFiltersPushdown() { + String query = """ + from test + | where (first_name:"Anna" or first_name:"Anneke") and length(last_name) > 5 + | sort emp_no + """; + var plan = plannerOptimizer.plan(query); + var topNExec = as(plan, TopNExec.class); + var exchange = as(topNExec.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var secondTopNExec = as(fieldExtract.child(), TopNExec.class); + var secondFieldExtract = as(secondTopNExec.child(), FieldExtractExec.class); + var filterExec = as(secondFieldExtract.child(), FilterExec.class); + var thirdFilterExtract = as(filterExec.child(), FieldExtractExec.class); + var actualLuceneQuery = as(thirdFilterExtract.child(), EsQueryExec.class).query(); + var expectedLuceneQuery = new BoolQueryBuilder().should(new MatchQueryBuilder("first_name", "Anna").lenient(true)) + .should(new MatchQueryBuilder("first_name", "Anneke").lenient(true)); + assertThat(actualLuceneQuery.toString(), is(expectedLuceneQuery.toString())); + } + /** * Expecting * LimitExec[1000[INTEGER]] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index c4d7b30115c2d..cfb993a7dd73d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -4906,7 +4906,7 @@ public void testPlanSanityCheck() throws Exception { } public void testPlanSanityCheckWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); var plan = optimizedPlan(""" FROM test @@ -5911,7 +5911,7 @@ public void testLookupStats() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -5954,7 +5954,7 @@ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnLeftSideField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -5998,7 +5998,7 @@ public void testLookupJoinPushDownFilterOnLeftSideField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownDisabledForLookupField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -6043,7 +6043,7 @@ public void testLookupJoinPushDownDisabledForLookupField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -6096,7 +6096,7 @@ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightFiel * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownDisabledForDisjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 9f6ef89008a24..964dd4642d7c2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -2331,7 +2331,7 @@ public void testVerifierOnMissingReferences() { } public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); // Do not assert serialization: // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index e4271a0a6ddd5..31ec4663738f7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.Build; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.ParsingException; @@ -1364,6 +1365,7 @@ public void testMetrics() { } public void testLookupJoin() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( "FROM employees | KEEP languages | RENAME languages AS language_code | LOOKUP JOIN languages_lookup ON language_code", Set.of("languages", "languages.*", "language_code", "language_code.*"), @@ -1372,6 +1374,7 @@ public void testLookupJoin() { } public void testLookupJoinKeep() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM employees @@ -1385,6 +1388,7 @@ public void testLookupJoinKeep() { } public void testLookupJoinKeepWildcard() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM employees @@ -1398,6 +1402,7 @@ public void testLookupJoinKeepWildcard() { } public void testMultiLookupJoin() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1410,6 +1415,7 @@ public void testMultiLookupJoin() { } public void testMultiLookupJoinKeepBefore() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1423,6 +1429,7 @@ public void testMultiLookupJoinKeepBefore() { } public void testMultiLookupJoinKeepBetween() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1447,6 +1454,7 @@ public void testMultiLookupJoinKeepBetween() { } public void testMultiLookupJoinKeepAfter() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1473,6 +1481,7 @@ public void testMultiLookupJoinKeepAfter() { } public void testMultiLookupJoinKeepAfterWildcard() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1486,6 +1495,7 @@ public void testMultiLookupJoinKeepAfterWildcard() { } public void testMultiLookupJoinSameIndex() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1499,6 +1509,7 @@ public void testMultiLookupJoinSameIndex() { } public void testMultiLookupJoinSameIndexKeepBefore() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1513,6 +1524,7 @@ public void testMultiLookupJoinSameIndexKeepBefore() { } public void testMultiLookupJoinSameIndexKeepBetween() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1538,6 +1550,7 @@ public void testMultiLookupJoinSameIndexKeepBetween() { } public void testMultiLookupJoinSameIndexKeepAfter() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java index 8a57dfa968ccd..9a30c2281d742 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.esql.type; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.time.Instant; import java.util.Arrays; import java.util.List; @@ -50,11 +52,19 @@ public class EsqlDataTypeConverterTests extends ESTestCase { public void testNanoTimeToString() { - long expected = randomLong(); + long expected = randomNonNegativeLong(); long actual = EsqlDataTypeConverter.dateNanosToLong(EsqlDataTypeConverter.nanoTimeToString(expected)); assertEquals(expected, actual); } + public void testStringToDateNanos() { + assertEquals( + DateUtils.toLong(Instant.parse("2023-01-01T00:00:00.000Z")), + EsqlDataTypeConverter.convert("2023-01-01T00:00:00.000000000", DATE_NANOS) + ); + assertEquals(DateUtils.toLong(Instant.parse("2023-01-01T00:00:00.000Z")), EsqlDataTypeConverter.convert("2023-01-01", DATE_NANOS)); + } + public void testCommonTypeNull() { for (DataType dataType : DataType.values()) { assertEqualsCommonType(dataType, NULL, dataType); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 3b7613b8b0e1f..876ff01812064 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -10,13 +10,15 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; -import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import java.util.Set; +import static org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor.SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED; +import static org.elasticsearch.xpack.inference.queries.SemanticSparseVectorQueryRewriteInterceptor.SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED; + /** * Provides inference features. */ @@ -45,7 +47,8 @@ public Set getTestFeatures() { SemanticTextFieldMapper.SEMANTIC_TEXT_ZERO_SIZE_FIX, SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX, SEMANTIC_TEXT_HIGHLIGHTER, - SemanticMatchQueryRewriteInterceptor.SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED + SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED, + SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 93743a5485c2c..169c8f87043e8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -80,6 +80,7 @@ import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.elasticsearch.xpack.inference.queries.SemanticSparseVectorQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; @@ -440,7 +441,7 @@ public List> getQueries() { @Override public List getQueryRewriteInterceptors() { - return List.of(new SemanticMatchQueryRewriteInterceptor()); + return List.of(new SemanticMatchQueryRewriteInterceptor(), new SemanticSparseVectorQueryRewriteInterceptor()); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java index a4a8123935c3e..fd1d65d00faf5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java @@ -7,24 +7,12 @@ package org.elasticsearch.xpack.inference.queries; -import org.elasticsearch.action.ResolvedIndices; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.query.TermsQueryBuilder; -import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -public class SemanticMatchQueryRewriteInterceptor implements QueryRewriteInterceptor { +public class SemanticMatchQueryRewriteInterceptor extends SemanticQueryRewriteInterceptor { public static final NodeFeature SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature( "search.semantic_match_query_rewrite_interception_supported" @@ -33,63 +21,45 @@ public class SemanticMatchQueryRewriteInterceptor implements QueryRewriteInterce public SemanticMatchQueryRewriteInterceptor() {} @Override - public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + protected String getFieldName(QueryBuilder queryBuilder) { assert (queryBuilder instanceof MatchQueryBuilder); MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; - QueryBuilder rewritten = queryBuilder; - ResolvedIndices resolvedIndices = context.getResolvedIndices(); - if (resolvedIndices != null) { - Collection indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values(); - List inferenceIndices = new ArrayList<>(); - List nonInferenceIndices = new ArrayList<>(); - for (IndexMetadata indexMetadata : indexMetadataCollection) { - String indexName = indexMetadata.getIndex().getName(); - InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(matchQueryBuilder.fieldName()); - if (inferenceFieldMetadata != null) { - inferenceIndices.add(indexName); - } else { - nonInferenceIndices.add(indexName); - } - } - - if (inferenceIndices.isEmpty()) { - return rewritten; - } else if (nonInferenceIndices.isEmpty() == false) { - BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - for (String inferenceIndexName : inferenceIndices) { - // Add a separate clause for each semantic query, because they may be using different inference endpoints - // TODO - consolidate this to a single clause once the semantic query supports multiple inference endpoints - boolQueryBuilder.should( - createSemanticSubQuery(inferenceIndexName, matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value()) - ); - } - boolQueryBuilder.should(createMatchSubQuery(nonInferenceIndices, matchQueryBuilder)); - rewritten = boolQueryBuilder; - } else { - rewritten = new SemanticQueryBuilder(matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value(), false); - } - } - - return rewritten; + return matchQueryBuilder.fieldName(); + } + @Override + protected String getQuery(QueryBuilder queryBuilder) { + assert (queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; + return (String) matchQueryBuilder.value(); } @Override - public String getQueryName() { - return MatchQueryBuilder.NAME; + protected QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation) { + return new SemanticQueryBuilder(indexInformation.fieldName(), getQuery(queryBuilder), false); } - private QueryBuilder createSemanticSubQuery(String indexName, String fieldName, String value) { + @Override + protected QueryBuilder buildCombinedInferenceAndNonInferenceQuery( + QueryBuilder queryBuilder, + InferenceIndexInformationForField indexInformation + ) { + assert (queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - boolQueryBuilder.must(new SemanticQueryBuilder(fieldName, value, true)); - boolQueryBuilder.filter(new TermQueryBuilder(IndexFieldMapper.NAME, indexName)); + boolQueryBuilder.should( + createSemanticSubQuery( + indexInformation.getInferenceIndices(), + matchQueryBuilder.fieldName(), + (String) matchQueryBuilder.value() + ) + ); + boolQueryBuilder.should(createSubQueryForIndices(indexInformation.nonInferenceIndices(), matchQueryBuilder)); return boolQueryBuilder; } - private QueryBuilder createMatchSubQuery(List indices, MatchQueryBuilder matchQueryBuilder) { - BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - boolQueryBuilder.must(matchQueryBuilder); - boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); - return boolQueryBuilder; + @Override + public String getQueryName() { + return MatchQueryBuilder.NAME; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java index 2a34651efcd9d..dd0f6fe59ab23 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -148,6 +148,14 @@ public String getWriteableName() { return NAME; } + public String getFieldName() { + return fieldName; + } + + public String getQuery() { + return query; + } + @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_15_0; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..bb76ef0be24e9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryRewriteInterceptor.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Intercepts and adapts a query to be rewritten to work seamlessly on a semantic_text field. + */ +public abstract class SemanticQueryRewriteInterceptor implements QueryRewriteInterceptor { + + public SemanticQueryRewriteInterceptor() {} + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + String fieldName = getFieldName(queryBuilder); + ResolvedIndices resolvedIndices = context.getResolvedIndices(); + + if (resolvedIndices == null) { + // No resolved indices, so return the original query. + return queryBuilder; + } + + InferenceIndexInformationForField indexInformation = resolveIndicesForField(fieldName, resolvedIndices); + if (indexInformation.getInferenceIndices().isEmpty()) { + // No inference fields were identified, so return the original query. + return queryBuilder; + } else if (indexInformation.nonInferenceIndices().isEmpty() == false) { + // Combined case where the field name requested by this query contains both + // semantic_text and non-inference fields, so we have to combine queries per index + // containing each field type. + return buildCombinedInferenceAndNonInferenceQuery(queryBuilder, indexInformation); + } else { + // The only fields we've identified are inference fields (e.g. semantic_text), + // so rewrite the entire query to work on a semantic_text field. + return buildInferenceQuery(queryBuilder, indexInformation); + } + } + + /** + * @param queryBuilder {@link QueryBuilder} + * @return The singular field name requested by the provided query builder. + */ + protected abstract String getFieldName(QueryBuilder queryBuilder); + + /** + * @param queryBuilder {@link QueryBuilder} + * @return The text/query string requested by the provided query builder. + */ + protected abstract String getQuery(QueryBuilder queryBuilder); + + /** + * Builds the inference query + * + * @param queryBuilder {@link QueryBuilder} + * @param indexInformation {@link InferenceIndexInformationForField} + * @return {@link QueryBuilder} + */ + protected abstract QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation); + + /** + * Builds a combined inference and non-inference query, + * which separates the different queries into appropriate indices based on field type. + * @param queryBuilder {@link QueryBuilder} + * @param indexInformation {@link InferenceIndexInformationForField} + * @return {@link QueryBuilder} + */ + protected abstract QueryBuilder buildCombinedInferenceAndNonInferenceQuery( + QueryBuilder queryBuilder, + InferenceIndexInformationForField indexInformation + ); + + private InferenceIndexInformationForField resolveIndicesForField(String fieldName, ResolvedIndices resolvedIndices) { + Collection indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values(); + Map inferenceIndicesMetadata = new HashMap<>(); + List nonInferenceIndices = new ArrayList<>(); + for (IndexMetadata indexMetadata : indexMetadataCollection) { + String indexName = indexMetadata.getIndex().getName(); + InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(fieldName); + if (inferenceFieldMetadata != null) { + inferenceIndicesMetadata.put(indexName, inferenceFieldMetadata); + } else { + nonInferenceIndices.add(indexName); + } + } + + return new InferenceIndexInformationForField(fieldName, inferenceIndicesMetadata, nonInferenceIndices); + } + + protected QueryBuilder createSubQueryForIndices(Collection indices, QueryBuilder queryBuilder) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(queryBuilder); + boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); + return boolQueryBuilder; + } + + protected QueryBuilder createSemanticSubQuery(Collection indices, String fieldName, String value) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(new SemanticQueryBuilder(fieldName, value, true)); + boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); + return boolQueryBuilder; + } + + /** + * Represents the indices and associated inference information for a field. + */ + public record InferenceIndexInformationForField( + String fieldName, + Map inferenceIndicesMetadata, + List nonInferenceIndices + ) { + + public Collection getInferenceIndices() { + return inferenceIndicesMetadata.keySet(); + } + + public Map> getInferenceIdsIndices() { + return inferenceIndicesMetadata.entrySet() + .stream() + .collect( + Collectors.groupingBy( + entry -> entry.getValue().getSearchInferenceId(), + Collectors.mapping(Map.Entry::getKey, Collectors.toList()) + ) + ); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..a35e83450c55a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilder; +import org.elasticsearch.xpack.inference.mapper.SemanticTextField; + +import java.util.List; +import java.util.Map; + +public class SemanticSparseVectorQueryRewriteInterceptor extends SemanticQueryRewriteInterceptor { + + public static final NodeFeature SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature( + "search.semantic_sparse_vector_query_rewrite_interception_supported" + ); + + public SemanticSparseVectorQueryRewriteInterceptor() {} + + @Override + protected String getFieldName(QueryBuilder queryBuilder) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + return sparseVectorQueryBuilder.getFieldName(); + } + + @Override + protected String getQuery(QueryBuilder queryBuilder) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + return sparseVectorQueryBuilder.getQuery(); + } + + @Override + protected QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation) { + Map> inferenceIdsIndices = indexInformation.getInferenceIdsIndices(); + if (inferenceIdsIndices.size() == 1) { + // Simple case, everything uses the same inference ID + String searchInferenceId = inferenceIdsIndices.keySet().iterator().next(); + return buildNestedQueryFromSparseVectorQuery(queryBuilder, searchInferenceId); + } else { + // Multiple inference IDs, construct a boolean query + return buildInferenceQueryWithMultipleInferenceIds(queryBuilder, inferenceIdsIndices); + } + } + + private QueryBuilder buildInferenceQueryWithMultipleInferenceIds( + QueryBuilder queryBuilder, + Map> inferenceIdsIndices + ) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + for (String inferenceId : inferenceIdsIndices.keySet()) { + boolQueryBuilder.should( + createSubQueryForIndices( + inferenceIdsIndices.get(inferenceId), + buildNestedQueryFromSparseVectorQuery(queryBuilder, inferenceId) + ) + ); + } + return boolQueryBuilder; + } + + @Override + protected QueryBuilder buildCombinedInferenceAndNonInferenceQuery( + QueryBuilder queryBuilder, + InferenceIndexInformationForField indexInformation + ) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + Map> inferenceIdsIndices = indexInformation.getInferenceIdsIndices(); + + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.should( + createSubQueryForIndices( + indexInformation.nonInferenceIndices(), + createSubQueryForIndices(indexInformation.nonInferenceIndices(), sparseVectorQueryBuilder) + ) + ); + // We always perform nested subqueries on semantic_text fields, to support + // sparse_vector queries using query vectors. + for (String inferenceId : inferenceIdsIndices.keySet()) { + boolQueryBuilder.should( + createSubQueryForIndices( + inferenceIdsIndices.get(inferenceId), + buildNestedQueryFromSparseVectorQuery(sparseVectorQueryBuilder, inferenceId) + ) + ); + } + return boolQueryBuilder; + } + + private QueryBuilder buildNestedQueryFromSparseVectorQuery(QueryBuilder queryBuilder, String searchInferenceId) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + return QueryBuilders.nestedQuery( + SemanticTextField.getChunksFieldName(sparseVectorQueryBuilder.getFieldName()), + new SparseVectorQueryBuilder( + SemanticTextField.getEmbeddingsFieldName(sparseVectorQueryBuilder.getFieldName()), + sparseVectorQueryBuilder.getQueryVectors(), + (sparseVectorQueryBuilder.getInferenceId() == null && sparseVectorQueryBuilder.getQuery() != null) + ? searchInferenceId + : sparseVectorQueryBuilder.getInferenceId(), + sparseVectorQueryBuilder.getQuery(), + sparseVectorQueryBuilder.shouldPruneTokens(), + sparseVectorQueryBuilder.getTokenPruningConfig() + ), + ScoreMode.Max + ); + } + + @Override + public String getQueryName() { + return SparseVectorQueryBuilder.NAME; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticMatchQueryRewriteInterceptorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticMatchQueryRewriteInterceptorTests.java new file mode 100644 index 0000000000000..47705c14d5941 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticMatchQueryRewriteInterceptorTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.action.MockResolvedIndices; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; +import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +public class SemanticMatchQueryRewriteInterceptorTests extends ESTestCase { + + private TestThreadPool threadPool; + private NoOpClient client; + private Index index; + + private static final String FIELD_NAME = "fieldName"; + private static final String VALUE = "value"; + + @Before + public void setup() { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); + index = new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + @After + public void cleanup() { + threadPool.close(); + } + + public void testMatchQueryOnInferenceFieldIsInterceptedAndRewrittenToSemanticQuery() throws IOException { + Map inferenceFields = Map.of( + FIELD_NAME, + new InferenceFieldMetadata(index.getName(), "inferenceId", new String[] { FIELD_NAME }) + ); + QueryRewriteContext context = createQueryRewriteContext(inferenceFields); + QueryBuilder original = createTestQueryBuilder(); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to be intercepted, but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof InterceptedQueryBuilderWrapper + ); + InterceptedQueryBuilderWrapper intercepted = (InterceptedQueryBuilderWrapper) rewritten; + assertTrue(intercepted.queryBuilder instanceof SemanticQueryBuilder); + SemanticQueryBuilder semanticQueryBuilder = (SemanticQueryBuilder) intercepted.queryBuilder; + assertEquals(FIELD_NAME, semanticQueryBuilder.getFieldName()); + assertEquals(VALUE, semanticQueryBuilder.getQuery()); + } + + public void testMatchQueryOnNonInferenceFieldRemainsMatchQuery() throws IOException { + QueryRewriteContext context = createQueryRewriteContext(Map.of()); // No inference fields + QueryBuilder original = createTestQueryBuilder(); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to remain match but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof MatchQueryBuilder + ); + assertEquals(original, rewritten); + } + + private MatchQueryBuilder createTestQueryBuilder() { + return new MatchQueryBuilder(FIELD_NAME, VALUE); + } + + private QueryRewriteContext createQueryRewriteContext(Map inferenceFields) { + IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + ) + .numberOfShards(1) + .numberOfReplicas(0) + .putInferenceFields(inferenceFields) + .build(); + + ResolvedIndices resolvedIndices = new MockResolvedIndices( + Map.of(), + new OriginalIndices(new String[] { index.getName() }, IndicesOptions.DEFAULT), + Map.of(index, indexMetadata) + ); + + return new QueryRewriteContext(null, client, null, resolvedIndices, null, createRewriteInterceptor()); + } + + private QueryRewriteInterceptor createRewriteInterceptor() { + return new SemanticMatchQueryRewriteInterceptor(); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticSparseVectorQueryRewriteInterceptorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticSparseVectorQueryRewriteInterceptorTests.java new file mode 100644 index 0000000000000..1adad1df7b29b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticSparseVectorQueryRewriteInterceptorTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.action.MockResolvedIndices; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilder; +import org.elasticsearch.xpack.inference.mapper.SemanticTextField; +import org.elasticsearch.xpack.inference.queries.SemanticSparseVectorQueryRewriteInterceptor; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +public class SemanticSparseVectorQueryRewriteInterceptorTests extends ESTestCase { + + private TestThreadPool threadPool; + private NoOpClient client; + private Index index; + + private static final String FIELD_NAME = "fieldName"; + private static final String INFERENCE_ID = "inferenceId"; + private static final String QUERY = "query"; + + @Before + public void setup() { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); + index = new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + @After + public void cleanup() { + threadPool.close(); + } + + public void testSparseVectorQueryOnInferenceFieldIsInterceptedAndRewritten() throws IOException { + Map inferenceFields = Map.of( + FIELD_NAME, + new InferenceFieldMetadata(index.getName(), "inferenceId", new String[] { FIELD_NAME }) + ); + QueryRewriteContext context = createQueryRewriteContext(inferenceFields); + QueryBuilder original = new SparseVectorQueryBuilder(FIELD_NAME, INFERENCE_ID, QUERY); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to be intercepted, but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof InterceptedQueryBuilderWrapper + ); + InterceptedQueryBuilderWrapper intercepted = (InterceptedQueryBuilderWrapper) rewritten; + assertTrue(intercepted.queryBuilder instanceof NestedQueryBuilder); + NestedQueryBuilder nestedQueryBuilder = (NestedQueryBuilder) intercepted.queryBuilder; + assertEquals(SemanticTextField.getChunksFieldName(FIELD_NAME), nestedQueryBuilder.path()); + QueryBuilder innerQuery = nestedQueryBuilder.query(); + assertTrue(innerQuery instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) innerQuery; + assertEquals(SemanticTextField.getEmbeddingsFieldName(FIELD_NAME), sparseVectorQueryBuilder.getFieldName()); + assertEquals(INFERENCE_ID, sparseVectorQueryBuilder.getInferenceId()); + assertEquals(QUERY, sparseVectorQueryBuilder.getQuery()); + } + + public void testSparseVectorQueryOnInferenceFieldWithoutInferenceIdIsInterceptedAndRewritten() throws IOException { + Map inferenceFields = Map.of( + FIELD_NAME, + new InferenceFieldMetadata(index.getName(), "inferenceId", new String[] { FIELD_NAME }) + ); + QueryRewriteContext context = createQueryRewriteContext(inferenceFields); + QueryBuilder original = new SparseVectorQueryBuilder(FIELD_NAME, null, QUERY); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to be intercepted, but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof InterceptedQueryBuilderWrapper + ); + InterceptedQueryBuilderWrapper intercepted = (InterceptedQueryBuilderWrapper) rewritten; + assertTrue(intercepted.queryBuilder instanceof NestedQueryBuilder); + NestedQueryBuilder nestedQueryBuilder = (NestedQueryBuilder) intercepted.queryBuilder; + assertEquals(SemanticTextField.getChunksFieldName(FIELD_NAME), nestedQueryBuilder.path()); + QueryBuilder innerQuery = nestedQueryBuilder.query(); + assertTrue(innerQuery instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) innerQuery; + assertEquals(SemanticTextField.getEmbeddingsFieldName(FIELD_NAME), sparseVectorQueryBuilder.getFieldName()); + assertEquals(INFERENCE_ID, sparseVectorQueryBuilder.getInferenceId()); + assertEquals(QUERY, sparseVectorQueryBuilder.getQuery()); + } + + public void testSparseVectorQueryOnNonInferenceFieldRemainsUnchanged() throws IOException { + QueryRewriteContext context = createQueryRewriteContext(Map.of()); // No inference fields + QueryBuilder original = new SparseVectorQueryBuilder(FIELD_NAME, INFERENCE_ID, QUERY); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to remain sparse_vector but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof SparseVectorQueryBuilder + ); + assertEquals(original, rewritten); + } + + private QueryRewriteContext createQueryRewriteContext(Map inferenceFields) { + IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + ) + .numberOfShards(1) + .numberOfReplicas(0) + .putInferenceFields(inferenceFields) + .build(); + + ResolvedIndices resolvedIndices = new MockResolvedIndices( + Map.of(), + new OriginalIndices(new String[] { index.getName() }, IndicesOptions.DEFAULT), + Map.of(index, indexMetadata) + ); + + return new QueryRewriteContext(null, client, null, resolvedIndices, null, createRewriteInterceptor()); + } + + private QueryRewriteInterceptor createRewriteInterceptor() { + return new SemanticSparseVectorQueryRewriteInterceptor(); + } +} diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/46_semantic_text_sparse_vector.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/46_semantic_text_sparse_vector.yml new file mode 100644 index 0000000000000..f1cff512fd209 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/46_semantic_text_sparse_vector.yml @@ -0,0 +1,249 @@ +setup: + - requires: + cluster_features: "search.semantic_sparse_vector_query_rewrite_interception_supported" + reason: semantic_text sparse_vector support introduced in 8.18.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id-2 + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-semantic-text-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + + - do: + indices.create: + index: test-semantic-text-index-2 + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id-2 + + - do: + indices.create: + index: test-sparse-vector-index + body: + mappings: + properties: + inference_field: + type: sparse_vector + + - do: + index: + index: test-semantic-text-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + refresh: true + + - do: + index: + index: test-semantic-text-index-2 + id: doc_3 + body: + inference_field: [ "inference test", "another inference test" ] + refresh: true + + - do: + index: + index: test-sparse-vector-index + id: doc_2 + body: + inference_field: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + refresh: true + +--- +"Nested sparse_vector queries using the old format on semantic_text embeddings and inference still work": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-semantic-text-index + body: + query: + nested: + path: inference_field.inference.chunks + query: + sparse_vector: + field: inference_field.inference.chunks.embeddings + inference_id: sparse-inference-id + query: test + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Nested sparse_vector queries using the old format on semantic_text embeddings and query vectors still work": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-semantic-text-index + body: + query: + nested: + path: inference_field.inference.chunks + query: + sparse_vector: + field: inference_field.inference.chunks.embeddings + query_vector: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against semantic_text field using a specified inference ID": + + - do: + search: + index: test-semantic-text-index + body: + query: + sparse_vector: + field: inference_field + inference_id: sparse-inference-id + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against semantic_text field using inference ID configured in semantic_text field": + + - do: + search: + index: test-semantic-text-index + body: + query: + sparse_vector: + field: inference_field + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against semantic_text field using query vectors": + + - do: + search: + index: test-semantic-text-index + body: + query: + sparse_vector: + field: inference_field + query_vector: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against combined sparse_vector and semantic_text fields using inference": + + - do: + search: + index: + - test-semantic-text-index + - test-sparse-vector-index + body: + query: + sparse_vector: + field: inference_field + inference_id: sparse-inference-id + query: "inference test" + + - match: { hits.total.value: 2 } + +--- +"sparse_vector query against combined sparse_vector and semantic_text fields still requires inference ID": + + - do: + catch: bad_request + search: + index: + - test-semantic-text-index + - test-sparse-vector-index + body: + query: + sparse_vector: + field: inference_field + query: "inference test" + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "inference_id required to perform vector search on query string" } + +--- +"sparse_vector query against combined sparse_vector and semantic_text fields using query vectors": + + - do: + search: + index: + - test-semantic-text-index + - test-sparse-vector-index + body: + query: + sparse_vector: + field: inference_field + query_vector: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + + - match: { hits.total.value: 2 } + + +--- +"sparse_vector query against multiple semantic_text fields with multiple inference IDs specified in semantic_text fields": + + - do: + search: + index: + - test-semantic-text-index + - test-semantic-text-index-2 + body: + query: + sparse_vector: + field: inference_field + query: "inference test" + + - match: { hits.total.value: 2 } + diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java index 6e24e644cb2af..b32a6efb854d7 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.migrate.MigratePlugin; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamRequest; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; -import org.elasticsearch.xpack.migrate.task.ReindexDataStreamStatus; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; import java.util.Collection; @@ -37,6 +37,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -100,7 +101,7 @@ public void testAlreadyUpToDateDataStream() throws Exception { assertThat(task.getStatus().complete(), equalTo(true)); assertNull(task.getStatus().exception()); assertThat(task.getStatus().pending(), equalTo(0)); - assertThat(task.getStatus().inProgress(), equalTo(0)); + assertThat(task.getStatus().inProgress(), equalTo(Set.of())); assertThat(task.getStatus().errors().size(), equalTo(0)); assertBusy(() -> { @@ -108,12 +109,12 @@ public void testAlreadyUpToDateDataStream() throws Exception { new ActionType(GetMigrationReindexStatusAction.NAME), new GetMigrationReindexStatusAction.Request(dataStreamName) ).actionGet(); - ReindexDataStreamStatus status = (ReindexDataStreamStatus) statusResponse.getTask().getTask().status(); + ReindexDataStreamEnrichedStatus status = statusResponse.getEnrichedStatus(); assertThat(status.complete(), equalTo(true)); assertThat(status.errors(), equalTo(List.of())); assertThat(status.exception(), equalTo(null)); assertThat(status.pending(), equalTo(0)); - assertThat(status.inProgress(), equalTo(0)); + assertThat(status.inProgress().size(), equalTo(0)); assertThat(status.totalIndices(), equalTo(backingIndexCount)); assertThat(status.totalIndicesToBeUpgraded(), equalTo(0)); }); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java index 68ccaef4bf02c..bc084f3e0b5d6 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java @@ -16,10 +16,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskResult; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; import java.io.IOException; import java.util.Objects; @@ -36,46 +35,43 @@ public GetMigrationReindexStatusAction() { } public static class Response extends ActionResponse implements ToXContentObject { - private final TaskResult task; + private final ReindexDataStreamEnrichedStatus enrichedStatus; - public Response(TaskResult task) { - this.task = requireNonNull(task, "task is required"); + public Response(ReindexDataStreamEnrichedStatus enrichedStatus) { + this.enrichedStatus = requireNonNull(enrichedStatus, "status is required"); } public Response(StreamInput in) throws IOException { super(in); - task = in.readOptionalWriteable(TaskResult::new); + enrichedStatus = in.readOptionalWriteable(ReindexDataStreamEnrichedStatus::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(task); + out.writeOptionalWriteable(enrichedStatus); } /** * Get the actual result of the fetch. */ - public TaskResult getTask() { - return task; + public ReindexDataStreamEnrichedStatus getEnrichedStatus() { + return enrichedStatus; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Task.Status status = task.getTask().status(); - if (status != null) { - task.getTask().status().toXContent(builder, params); - } + enrichedStatus.toXContent(builder, params); return builder; } @Override public int hashCode() { - return Objects.hashCode(task); + return Objects.hashCode(enrichedStatus); } @Override public boolean equals(Object other) { - return other instanceof Response && task.equals(((Response) other).task); + return other instanceof Response && enrichedStatus.equals(((Response) other).enrichedStatus); } @Override diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java index ca81a03fc5630..64864491191e5 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java @@ -11,40 +11,55 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.tasks.TaskResult; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Request; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Response; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamStatus; +import java.util.HashMap; import java.util.Map; import java.util.Optional; +import java.util.Set; +import java.util.stream.Stream; public class GetMigrationReindexStatusTransportAction extends HandledTransportAction { private final ClusterService clusterService; private final TransportService transportService; + private final Client client; @Inject public GetMigrationReindexStatusTransportAction( ClusterService clusterService, TransportService transportService, - ActionFilters actionFilters + ActionFilters actionFilters, + Client client ) { super(GetMigrationReindexStatusAction.NAME, transportService, actionFilters, Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.transportService = transportService; + this.client = client; } @Override @@ -60,9 +75,9 @@ protected void doExecute(Task task, Request request, ActionListener li } else if (persistentTask.isAssigned()) { String nodeId = persistentTask.getExecutorNode(); if (clusterService.localNode().getId().equals(nodeId)) { - getRunningTaskFromNode(persistentTaskId, listener); + fetchAndReportStatusForTaskOnThisNode(persistentTaskId, listener); } else { - runOnNodeWithTaskIfPossible(task, request, nodeId, listener); + fetchAndReportStatusForTaskOnRemoteNode(task, request, nodeId, listener); } } else { listener.onFailure(new ElasticsearchException("Persistent task with id [{}] is not assigned to a node", persistentTaskId)); @@ -82,7 +97,7 @@ private Task getRunningPersistentTaskFromTaskManager(String persistentTaskId) { return optionalTask.map(Map.Entry::getValue).orElse(null); } - void getRunningTaskFromNode(String persistentTaskId, ActionListener listener) { + void fetchAndReportStatusForTaskOnThisNode(String persistentTaskId, ActionListener listener) { Task runningTask = getRunningPersistentTaskFromTaskManager(persistentTaskId); if (runningTask == null) { listener.onFailure( @@ -96,11 +111,97 @@ void getRunningTaskFromNode(String persistentTaskId, ActionListener li ); } else { TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); - listener.onResponse(new Response(new TaskResult(false, info))); + ReindexDataStreamStatus status = (ReindexDataStreamStatus) info.status(); + Set inProgressIndices = status.inProgress(); + if (inProgressIndices.isEmpty()) { + // We have no reason to fetch index stats since there are no in progress indices + reportStatus(Map.of(), status, listener); + } else { + fetchInProgressStatsAndReportStatus(inProgressIndices, status, listener); + } } } - private void runOnNodeWithTaskIfPossible(Task thisTask, Request request, String nodeId, ActionListener listener) { + /* + * The status is enriched with the information from inProgressMap to create a new ReindexDataStreamEnrichedStatus, which is used in the + * response sent to the listener. + */ + private void reportStatus( + Map> inProgressMap, + ReindexDataStreamStatus status, + ActionListener listener + ) { + ReindexDataStreamEnrichedStatus enrichedStatus = new ReindexDataStreamEnrichedStatus( + status.persistentTaskStartTime(), + status.totalIndices(), + status.totalIndicesToBeUpgraded(), + status.complete(), + status.exception(), + inProgressMap, + status.pending(), + status.errors() + ); + listener.onResponse(new Response(enrichedStatus)); + } + + /* + * This method feches doc counts for all indices in inProgressIndices (and the indices they are being reindexed into). After + * successfully fetching those, reportStatus is called. + */ + private void fetchInProgressStatsAndReportStatus( + Set inProgressIndices, + ReindexDataStreamStatus status, + ActionListener listener + ) { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + String[] indices = inProgressIndices.stream() + .flatMap(index -> Stream.of(index, ReindexDataStreamIndexTransportAction.generateDestIndexName(index))) + .toList() + .toArray(new String[0]); + indicesStatsRequest.indices(indices); + /* + * It is possible that the destination index will not exist yet, so we want to ignore the fact that it is missing + */ + indicesStatsRequest.indicesOptions(IndicesOptions.fromOptions(true, true, true, true)); + client.execute(IndicesStatsAction.INSTANCE, indicesStatsRequest, new ActionListener() { + @Override + public void onResponse(IndicesStatsResponse indicesStatsResponse) { + Map> inProgressMap = new HashMap<>(); + for (String index : inProgressIndices) { + IndexStats sourceIndexStats = indicesStatsResponse.getIndex(index); + final long totalDocsInIndex; + if (sourceIndexStats == null) { + totalDocsInIndex = 0; + } else { + DocsStats totalDocsStats = sourceIndexStats.getTotal().getDocs(); + totalDocsInIndex = totalDocsStats == null ? 0 : totalDocsStats.getCount(); + } + IndexStats migratedIndexStats = indicesStatsResponse.getIndex( + ReindexDataStreamIndexTransportAction.generateDestIndexName(index) + ); + final long reindexedDocsInIndex; + if (migratedIndexStats == null) { + reindexedDocsInIndex = 0; + } else { + DocsStats reindexedDocsStats = migratedIndexStats.getTotal().getDocs(); + reindexedDocsInIndex = reindexedDocsStats == null ? 0 : reindexedDocsStats.getCount(); + } + inProgressMap.put(index, Tuple.tuple(totalDocsInIndex, reindexedDocsInIndex)); + } + reportStatus(inProgressMap, status, listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + /* + * The task and its status exist on some other node, so this method forwards the request to that node. + */ + private void fetchAndReportStatusForTaskOnRemoteNode(Task thisTask, Request request, String nodeId, ActionListener listener) { DiscoveryNode node = clusterService.state().nodes().get(nodeId); if (node == null) { listener.onFailure( diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatus.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatus.java new file mode 100644 index 0000000000000..9dbe1f0c8eebc --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatus.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.task; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/* + * This class represents information similar to that in ReindexDataStreamStatus, but enriched from other sources besides just the task + * itself. + */ +public record ReindexDataStreamEnrichedStatus( + long persistentTaskStartTime, + int totalIndices, + int totalIndicesToBeUpgraded, + boolean complete, + Exception exception, + Map> inProgress, + int pending, + List> errors +) implements ToXContentObject, Writeable { + public ReindexDataStreamEnrichedStatus { + Objects.requireNonNull(inProgress); + Objects.requireNonNull(errors); + } + + public ReindexDataStreamEnrichedStatus(StreamInput in) throws IOException { + this( + in.readLong(), + in.readInt(), + in.readInt(), + in.readBoolean(), + in.readException(), + in.readMap(StreamInput::readString, in2 -> Tuple.tuple(in2.readLong(), in2.readLong())), + in.readInt(), + in.readCollectionAsList(in1 -> Tuple.tuple(in1.readString(), in1.readException())) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(persistentTaskStartTime); + out.writeInt(totalIndices); + out.writeInt(totalIndicesToBeUpgraded); + out.writeBoolean(complete); + out.writeException(exception); + out.writeMap(inProgress, StreamOutput::writeString, (out2, tuple) -> { + out2.writeLong(tuple.v1()); + out2.writeLong(tuple.v2()); + }); + out.writeInt(pending); + out.writeCollection(errors, (out1, tuple) -> { + out1.writeString(tuple.v1()); + out1.writeException(tuple.v2()); + }); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.timestampFieldsFromUnixEpochMillis("start_time_millis", "start_time", persistentTaskStartTime); + builder.field("complete", complete); + builder.field("total_indices_in_data_stream", totalIndices); + builder.field("total_indices_requiring_upgrade", totalIndicesToBeUpgraded); + builder.field("successes", totalIndicesToBeUpgraded - (inProgress.size() + pending + errors.size())); + builder.startArray("in_progress"); + for (Map.Entry> inProgressEntry : inProgress.entrySet()) { + builder.startObject(); + builder.field("index", inProgressEntry.getKey()); + builder.field("total_doc_count", inProgressEntry.getValue().v1()); + builder.field("reindexed_doc_count", inProgressEntry.getValue().v2()); + builder.endObject(); + } + builder.endArray(); + builder.field("pending", pending); + builder.startArray("errors"); + for (Tuple error : errors) { + builder.startObject(); + builder.field("index", error.v1()); + builder.field("message", error.v2() == null ? "unknown" : error.v2().getMessage()); + builder.endObject(); + } + builder.endArray(); + if (exception != null) { + builder.field("exception", exception.getMessage()); + } + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index 176220a1ccae8..494be303980a7 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -78,9 +78,9 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask .toList(); reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); for (Index index : indicesToBeReindexed) { - reindexDataStreamTask.incrementInProgressIndicesCount(); + reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); // TODO This is just a placeholder. This is where the real data stream reindex logic will go - reindexDataStreamTask.reindexSucceeded(); + reindexDataStreamTask.reindexSucceeded(index.getName()); } completeSuccessfulPersistentTask(reindexDataStreamTask); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java index 358062550b50a..632aea076ea5a 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java @@ -16,6 +16,7 @@ import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.Set; public record ReindexDataStreamStatus( long persistentTaskStartTime, @@ -23,11 +24,12 @@ public record ReindexDataStreamStatus( int totalIndicesToBeUpgraded, boolean complete, Exception exception, - int inProgress, + Set inProgress, int pending, List> errors ) implements Task.Status { public ReindexDataStreamStatus { + Objects.requireNonNull(inProgress); Objects.requireNonNull(errors); } @@ -40,7 +42,7 @@ public ReindexDataStreamStatus(StreamInput in) throws IOException { in.readInt(), in.readBoolean(), in.readException(), - in.readInt(), + in.readCollectionAsSet((Reader) StreamInput::readString), in.readInt(), in.readCollectionAsList(in1 -> Tuple.tuple(in1.readString(), in1.readException())) ); @@ -58,7 +60,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(totalIndicesToBeUpgraded); out.writeBoolean(complete); out.writeException(exception); - out.writeInt(inProgress); + out.writeStringCollection(inProgress); out.writeInt(pending); out.writeCollection(errors, (out1, tuple) -> { out1.writeString(tuple.v1()); @@ -69,12 +71,13 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("start_time", persistentTaskStartTime); + builder.timestampFieldsFromUnixEpochMillis("start_time_millis", "start_time", persistentTaskStartTime); builder.field("complete", complete); - builder.field("total_indices", totalIndices); + builder.field("total_indices_in_data_stream", totalIndices); builder.field("total_indices_requiring_upgrade", totalIndicesToBeUpgraded); - builder.field("successes", totalIndicesToBeUpgraded - (inProgress + pending + errors.size())); - builder.field("in_progress", inProgress); + final int inProgressSize = inProgress.size(); + builder.field("successes", totalIndicesToBeUpgraded - (inProgressSize + pending + errors.size())); + builder.field("in_progress", inProgressSize); builder.field("pending", pending); builder.startArray("errors"); for (Tuple error : errors) { diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java index 844f24f45ab77..7a2b759dfd17a 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java @@ -15,8 +15,11 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; public class ReindexDataStreamTask extends AllocatedPersistentTask { @@ -26,9 +29,9 @@ public class ReindexDataStreamTask extends AllocatedPersistentTask { private final int totalIndicesToBeUpgraded; private volatile boolean complete = false; private volatile Exception exception; - private final AtomicInteger inProgress = new AtomicInteger(0); + private final Set inProgress = Collections.synchronizedSet(new HashSet<>()); private final AtomicInteger pending = new AtomicInteger(); - private final List> errors = new ArrayList<>(); + private final List> errors = Collections.synchronizedList(new ArrayList<>()); private final RunOnce completeTask; @SuppressWarnings("this-escape") @@ -64,7 +67,7 @@ public ReindexDataStreamStatus getStatus() { totalIndicesToBeUpgraded, complete, exception, - inProgress.get(), + inProgress, pending.get(), errors ); @@ -84,17 +87,17 @@ public void taskFailed(ThreadPool threadPool, TimeValue timeToLive, Exception e) allReindexesCompleted(threadPool, timeToLive); } - public void reindexSucceeded() { - inProgress.decrementAndGet(); + public void reindexSucceeded(String index) { + inProgress.remove(index); } public void reindexFailed(String index, Exception error) { this.errors.add(Tuple.tuple(index, error)); - inProgress.decrementAndGet(); + inProgress.remove(index); } - public void incrementInProgressIndicesCount() { - inProgress.incrementAndGet(); + public void incrementInProgressIndicesCount(String index) { + inProgress.add(index); pending.decrementAndGet(); } diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java index a18030edbf42c..1361f30840c87 100644 --- a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java @@ -7,25 +7,17 @@ package org.elasticsearch.xpack.migrate.action; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.tasks.RawTaskStatus; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.tasks.TaskResult; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Response; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; import java.io.IOException; -import java.util.Collections; +import java.util.List; import java.util.Map; -import java.util.TreeMap; public class GetMigrationReindexStatusActionResponseTests extends AbstractWireSerializingTestCase { @Override @@ -35,11 +27,7 @@ protected Writeable.Reader instanceReader() { @Override protected Response createTestInstance() { - try { - return new Response(randomTaskResult()); - } catch (IOException e) { - throw new RuntimeException(e); - } + return new Response(getRandomStatus()); } @Override @@ -47,76 +35,44 @@ protected Response mutateInstance(Response instance) throws IOException { return createTestInstance(); // There's only one field } - private static TaskResult randomTaskResult() throws IOException { - return switch (between(0, 2)) { - case 0 -> new TaskResult(randomBoolean(), randomTaskInfo()); - case 1 -> new TaskResult(randomTaskInfo(), new RuntimeException("error")); - case 2 -> new TaskResult(randomTaskInfo(), randomTaskResponse()); - default -> throw new UnsupportedOperationException("Unsupported random TaskResult constructor"); - }; - } - - static TaskInfo randomTaskInfo() { - String nodeId = randomAlphaOfLength(5); - TaskId taskId = randomTaskId(nodeId); - String type = randomAlphaOfLength(5); - String action = randomAlphaOfLength(5); - Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; - String description = randomBoolean() ? randomAlphaOfLength(5) : null; - long startTime = randomLong(); - long runningTimeNanos = randomNonNegativeLong(); - boolean cancellable = randomBoolean(); - boolean cancelled = cancellable && randomBoolean(); - TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(randomAlphaOfLength(5)); - Map headers = randomBoolean() - ? Collections.emptyMap() - : Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); - return new TaskInfo( - taskId, - type, - nodeId, - action, - description, - status, - startTime, - runningTimeNanos, - cancellable, - cancelled, - parentTaskId, - headers + private ReindexDataStreamEnrichedStatus getRandomStatus() { + return new ReindexDataStreamEnrichedStatus( + randomLong(), + randomNegativeInt(), + randomNegativeInt(), + randomBoolean(), + nullableTestException(), + randomInProgressMap(), + randomNegativeInt(), + randomErrorList() ); } - private static TaskId randomTaskId(String nodeId) { - return new TaskId(nodeId, randomLong()); + private Map> randomInProgressMap() { + return randomMap(1, 50, () -> Tuple.tuple(randomAlphaOfLength(50), Tuple.tuple(randomNonNegativeLong(), randomNonNegativeLong()))); } - private static RawTaskStatus randomRawTaskStatus() { - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.startObject(); - int fields = between(0, 10); - for (int f = 0; f < fields; f++) { - builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5)); - } - builder.endObject(); - return new RawTaskStatus(BytesReference.bytes(builder)); - } catch (IOException e) { - throw new IllegalStateException(e); + private Exception nullableTestException() { + if (randomBoolean()) { + return testException(); } + return null; } - private static ToXContent randomTaskResponse() { - Map result = new TreeMap<>(); - int fields = between(0, 10); - for (int f = 0; f < fields; f++) { - result.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); - } - return (builder, params) -> { - for (Map.Entry entry : result.entrySet()) { - builder.field(entry.getKey(), entry.getValue()); - } - return builder; - }; + private Exception testException() { + /* + * Unfortunately ElasticsearchException doesn't have an equals and just falls back to Object::equals. So we can't test for equality + * when we're using an exception. So always just use null. + */ + return null; + } + + private List> randomErrorList() { + return randomErrorList(0); + } + + private List> randomErrorList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> Tuple.tuple(randomAlphaOfLength(30), testException())); } @Override diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java new file mode 100644 index 0000000000000..acd8cd1a6add2 --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.task; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Map.entry; +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.equalTo; + +public class ReindexDataStreamEnrichedStatusTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return ReindexDataStreamEnrichedStatus::new; + } + + @Override + protected ReindexDataStreamEnrichedStatus createTestInstance() { + return new ReindexDataStreamEnrichedStatus( + randomLong(), + randomNegativeInt(), + randomNegativeInt(), + randomBoolean(), + nullableTestException(), + randomInProgressMap(), + randomNegativeInt(), + randomErrorList() + ); + } + + private Map> randomInProgressMap() { + return randomMap(1, 50, () -> Tuple.tuple(randomAlphaOfLength(50), Tuple.tuple(randomNonNegativeLong(), randomNonNegativeLong()))); + } + + private Exception nullableTestException() { + if (randomBoolean()) { + return testException(); + } + return null; + } + + private Exception testException() { + /* + * Unfortunately ElasticsearchException doesn't have an equals and just falls back to Object::equals. So we can't test for equality + * when we're using an exception. So always just use null. + */ + return null; + } + + private List randomList() { + return randomList(0); + } + + private List randomList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); + } + + private Set randomSet(int minSize) { + return randomSet(minSize, 100, () -> randomAlphaOfLength(50)); + } + + private List> randomErrorList() { + return randomErrorList(0); + } + + private List> randomErrorList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> Tuple.tuple(randomAlphaOfLength(30), testException())); + } + + @Override + protected ReindexDataStreamEnrichedStatus mutateInstance(ReindexDataStreamEnrichedStatus instance) throws IOException { + long startTime = instance.persistentTaskStartTime(); + int totalIndices = instance.totalIndices(); + int totalIndicesToBeUpgraded = instance.totalIndicesToBeUpgraded(); + boolean complete = instance.complete(); + Exception exception = instance.exception(); + Map> inProgress = instance.inProgress(); + int pending = instance.pending(); + List> errors = instance.errors(); + switch (randomIntBetween(0, 6)) { + case 0 -> startTime = randomLong(); + case 1 -> totalIndices = totalIndices + 1; + case 2 -> totalIndicesToBeUpgraded = totalIndicesToBeUpgraded + 1; + case 3 -> complete = complete == false; + case 4 -> inProgress = randomInProgressMap(); + case 5 -> pending = pending + 1; + case 6 -> errors = randomErrorList(errors.size() + 1); + default -> throw new UnsupportedOperationException(); + } + return new ReindexDataStreamEnrichedStatus( + startTime, + totalIndices, + totalIndicesToBeUpgraded, + complete, + exception, + inProgress, + pending, + errors + ); + } + + public void testToXContent() throws IOException { + ReindexDataStreamEnrichedStatus status = new ReindexDataStreamEnrichedStatus( + 1234L, + 200, + 100, + true, + new ElasticsearchException("the whole task failed"), + Map.of("index-1", Tuple.tuple(10L, 8L)), + 8, + List.of( + Tuple.tuple("index7", new ElasticsearchException("index7 failed")), + Tuple.tuple("index8", new ElasticsearchException("index8 " + "failed")) + ) + ); + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent)) { + builder.humanReadable(true); + status.toXContent(builder, EMPTY_PARAMS); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map parserMap = parser.map(); + assertThat( + parserMap, + equalTo( + Map.ofEntries( + entry("start_time", "1970-01-01T00:00:01.234Z"), + entry("start_time_millis", 1234), + entry("total_indices_in_data_stream", 200), + entry("total_indices_requiring_upgrade", 100), + entry("complete", true), + entry("exception", "the whole task failed"), + entry("successes", 89), + entry("in_progress", List.of(Map.of("index", "index-1", "total_doc_count", 10, "reindexed_doc_count", 8))), + entry("pending", 8), + entry( + "errors", + List.of( + Map.of("index", "index7", "message", "index7 failed"), + Map.of("index", "index8", "message", "index8 failed") + ) + ) + ) + ) + ); + } + } + } +} diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java index d81e9d35cd490..47e2d02bee3b0 100644 --- a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; import static java.util.Map.entry; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; @@ -39,7 +40,7 @@ protected ReindexDataStreamStatus createTestInstance() { randomNegativeInt(), randomBoolean(), nullableTestException(), - randomNegativeInt(), + randomSet(0), randomNegativeInt(), randomErrorList() ); @@ -68,6 +69,10 @@ private List randomList(int minSize) { return randomList(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); } + private Set randomSet(int minSize) { + return randomSet(minSize, 100, () -> randomAlphaOfLength(50)); + } + private List> randomErrorList() { return randomErrorList(0); } @@ -83,7 +88,7 @@ protected ReindexDataStreamStatus mutateInstance(ReindexDataStreamStatus instanc int totalIndicesToBeUpgraded = instance.totalIndicesToBeUpgraded(); boolean complete = instance.complete(); Exception exception = instance.exception(); - int inProgress = instance.inProgress(); + Set inProgress = instance.inProgress(); int pending = instance.pending(); List> errors = instance.errors(); switch (randomIntBetween(0, 6)) { @@ -91,7 +96,7 @@ protected ReindexDataStreamStatus mutateInstance(ReindexDataStreamStatus instanc case 1 -> totalIndices = totalIndices + 1; case 2 -> totalIndicesToBeUpgraded = totalIndicesToBeUpgraded + 1; case 3 -> complete = complete == false; - case 4 -> inProgress = inProgress + 1; + case 4 -> inProgress = randomSet(inProgress.size() + 1); case 5 -> pending = pending + 1; case 6 -> errors = randomErrorList(errors.size() + 1); default -> throw new UnsupportedOperationException(); @@ -115,7 +120,7 @@ public void testToXContent() throws IOException { 100, true, new ElasticsearchException("the whole task failed"), - 12, + randomSet(12, 12, () -> randomAlphaOfLength(50)), 8, List.of( Tuple.tuple("index7", new ElasticsearchException("index7 failed")), @@ -131,8 +136,9 @@ public void testToXContent() throws IOException { parserMap, equalTo( Map.ofEntries( - entry("start_time", 1234), - entry("total_indices", 200), + entry("start_time", "1970-01-01T00:00:01.234Z"), + entry("start_time_millis", 1234), + entry("total_indices_in_data_stream", 200), entry("total_indices_requiring_upgrade", 100), entry("complete", true), entry("exception", "the whole task failed"), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java index 3c0d2aca4deda..3c82841f1b99e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java @@ -8,10 +8,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -102,7 +104,29 @@ public void executeRequest() { try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { BulkResponse addRecordsResponse = client.bulk(bulkRequest).actionGet(); if (addRecordsResponse.hasFailures()) { - logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage()); + // Implementation note: Ignore the failures from writing to the read-only index, as it comes + // from changing the index format version. + boolean hasNonReadOnlyFailures = false; + for (BulkItemResponse response : addRecordsResponse.getItems()) { + if (response.isFailed() == false) { + continue; + } + if (response.getFailureMessage().contains(IndexMetadata.INDEX_READ_ONLY_BLOCK.description())) { + // We expect this to happen when the old index is made read-only and being reindexed + logger.debug( + "[{}] Ignoring failure to write renormalized results to a read-only index [{}]: {}", + jobId, + response.getFailure().getIndex(), + response.getFailureMessage() + ); + } else { + hasNonReadOnlyFailures = true; + break; + } + } + if (hasNonReadOnlyFailures) { + logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage()); + } } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index f4d50df4ff613..35da4abec223a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -462,6 +462,7 @@ public void testToXContent() throws IOException { pluginEsBuildVersion, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), apmIndicesExist }; final String expectedJson = """ @@ -817,6 +818,7 @@ public void testToXContent() throws IOException { ], "version": "%s", "min_index_version":%s, + "min_read_only_index_version":%s, "max_index_version":%s } }, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java index 3637159479463..862ff2552b4e3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestQueryRoleAction.java @@ -34,7 +34,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; -@ServerlessScope(Scope.INTERNAL) +@ServerlessScope(Scope.PUBLIC) public final class RestQueryRoleAction extends NativeRoleBaseRestHandler { @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 36887681f5575..9955fe4cf0f95 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -529,6 +529,7 @@ public void testValidateIntervalScheduleSupport() { var featureService = new FeatureService(List.of(new SnapshotLifecycleFeatures())); { ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a")).add(DiscoveryNodeUtils.create("b"))) .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) .build(); @@ -540,6 +541,7 @@ public void testValidateIntervalScheduleSupport() { } { ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a"))) .nodeFeatures(Map.of("a", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) .build(); try { @@ -550,6 +552,7 @@ public void testValidateIntervalScheduleSupport() { } { ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a")).add(DiscoveryNodeUtils.create("b"))) .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) .build(); try { diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec index 60e81be43cc96..2fa82c05cc1aa 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec @@ -3353,7 +3353,7 @@ Alejandro Amabile Anoosh Basil -Bojan +Brendon // end::filterToday ; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml index 663c0dc78acb3..118783b412d48 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml @@ -170,7 +170,7 @@ setup: - match: { error.reason: "Found 1 problem\nline 1:36: Unknown column [content], did you mean [count(*)]?" } --- -"match with functions": +"match with disjunctions": - do: catch: bad_request allowed_warnings_regex: @@ -181,7 +181,20 @@ setup: - match: { status: 400 } - match: { error.type: verification_exception } - - match: { error.reason: "Found 1 problem\nline 1:19: Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. [:] operator can't be used as part of an or condition" } + - match: { error.reason: "/.+Invalid\\ condition\\ \\[content\\:\"fox\"\\ OR\\ to_upper\\(content\\)\\ ==\\ \"FOX\"\\]\\./" } + + - do: + catch: bad_request + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test | WHERE content:"fox" OR to_upper(content) == "FOX"' + + - match: { status: 400 } + - match: { error.type: verification_exception } + - match: { error.reason: "/.+Invalid\\ condition\\ \\[content\\:\"fox\"\\ OR\\ to_upper\\(content\\)\\ ==\\ \"FOX\"\\]\\./" } + --- "match within eval": diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 2a4cde9a680e9..b6d75048591e5 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -92,7 +92,7 @@ setup: - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} # Testing for the entire function set isn't feasbile, so we just check that we return the correct count as an approximation. - - length: {esql.functions: 129} # check the "sister" test below for a likely update to the same esql.functions length check + - length: {esql.functions: 130} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": @@ -163,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 125} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 126} # check the "sister" test above for a likely update to the same esql.functions length check diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml index c94ce8dd211ae..80ca95d631491 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml @@ -61,10 +61,10 @@ setup: migrate.get_reindex_status: index: "my-data-stream" - match: { complete: true } - - match: { total_indices: 1 } + - match: { total_indices_in_data_stream: 1 } - match: { total_indices_requiring_upgrade: 0 } - match: { successes: 0 } - - match: { in_progress: 0 } + - match: { in_progress: [] } - match: { pending: 0 } - match: { errors: [] } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml index 332981a580802..3481773b0bab3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml @@ -268,7 +268,7 @@ setup: - match: { hits.hits.0._score: 0.25 } --- -"Test sparse_vector requires one of inference_id or query_vector": +"Test sparse_vector requires one of query or query_vector": - do: catch: /\[sparse_vector\] requires one of \[query_vector\] or \[inference_id\]/ search: @@ -281,7 +281,41 @@ setup: - match: { status: 400 } --- -"Test sparse_vector only allows one of inference_id or query_vector": +"Test sparse_vector returns an error if inference ID not specified with query": + - do: + catch: bad_request # This is for BWC, the actual error message is tested in a subsequent test + search: + index: index-with-sparse-vector + body: + query: + sparse_vector: + field: text + query: "octopus comforter smells" + + - match: { status: 400 } + +--- +"Test sparse_vector requires an inference ID to be specified on sparse_vector fields": + - requires: + cluster_features: [ "search.semantic_sparse_vector_query_rewrite_interception_supported" ] + reason: "Error message changed in 8.18" + - do: + catch: /inference_id required to perform vector search on query string/ + search: + index: index-with-sparse-vector + body: + query: + sparse_vector: + field: text + query: "octopus comforter smells" + + - match: { status: 400 } + +--- +"Test sparse_vector only allows one of query or query_vector (note the error message is misleading)": + - requires: + cluster_features: [ "search.semantic_sparse_vector_query_rewrite_interception_supported" ] + reason: "sparse vector inference checks updated in 8.18 to support sparse_vector on semantic_text fields" - do: catch: /\[sparse_vector\] requires one of \[query_vector\] or \[inference_id\]/ search: @@ -290,7 +324,7 @@ setup: query: sparse_vector: field: text - inference_id: text_expansion_model + query: "octopus comforter smells" query_vector: the: 1.0 comforter: 1.0