diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 6e15d64154960..9efb9c8b498aa 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index abd11068e7a65..b1e5a7bf933c9 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -287,8 +287,8 @@ steps: env: BWC_VERSION: 8.15.5 - - label: "{{matrix.image}} / 8.16.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.2 + - label: "{{matrix.image}} / 8.16.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.3 timeout_in_minutes: 300 matrix: setup: @@ -301,7 +301,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.2 + BWC_VERSION: 8.16.3 - label: "{{matrix.image}} / 8.17.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.1 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index c5846a763f5e8..ea0d7b13b55b4 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -63,6 +63,7 @@ steps: image: - almalinux-8-aarch64 - ubuntu-2004-aarch64 + - ubuntu-2404-aarch64 GRADLE_TASK: - checkPart1 - checkPart2 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f2d169cd2b30d..4c593bae62d7a 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.2#bwcTest + - label: 8.16.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.3#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.2 + BWC_VERSION: 8.16.3 retry: automatic: - exit_status: "-1" @@ -448,7 +448,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -490,7 +490,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 3cb983373138f..cf12ee8c15419 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -15,7 +15,7 @@ BWC_VERSION: - "8.13.4" - "8.14.3" - "8.15.5" - - "8.16.2" + - "8.16.3" - "8.17.1" - "8.18.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index e05c0774c9819..68c6ad5601546 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "8.16.2" + - "8.16.3" - "8.17.1" - "8.18.0" - "9.0.0" diff --git a/.gitignore b/.gitignore index d1af97cbaea3b..8b2da4dc0832a 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,6 @@ testfixtures_shared/ # Generated checkstyle_ide.xml x-pack/plugin/esql/src/main/generated-src/generated/ + +# JEnv +.java-version diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index d54eb798ce783..985c98bcd7883 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -22,7 +22,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:1b51ff6dba78c98d3e02b0cd64a8ce3238c7a40408d21e3af12a329d44db6f23", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:bfdeddb33330a281950c2a54adef991dbbe6a42832bc505d13b11beaf50ae73f", "-wolfi", "apk" ), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index e45a1d3dd25b1..7046a22204efa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -16,12 +16,14 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.component.ModuleComponentIdentifier; +import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.TaskProvider; import java.io.File; import java.nio.file.Path; import static org.elasticsearch.gradle.internal.util.DependenciesUtils.createFileCollectionFromNonTransitiveArtifactsView; +import static org.elasticsearch.gradle.internal.util.DependenciesUtils.thirdPartyDependenciesView; import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @@ -47,7 +49,6 @@ public TaskProvider createTask(Project project) { project.getDependencies().add(JDK_JAR_HELL_CONFIG_NAME, elasticsearchCoreProject); } } - TaskProvider resourcesTask = project.getTasks() .register("thirdPartyAuditResources", ExportElasticsearchBuildResourcesTask.class); Path resourcesDir = project.getBuildDir().toPath().resolve("third-party-audit-config"); @@ -59,9 +60,11 @@ public TaskProvider createTask(Project project) { // usually only one task is created. but this construct makes our integTests easier to setup project.getTasks().withType(ThirdPartyAuditTask.class).configureEach(t -> { Configuration runtimeConfiguration = project.getConfigurations().getByName("runtimeClasspath"); + FileCollection runtimeThirdParty = thirdPartyDependenciesView(runtimeConfiguration); Configuration compileOnly = project.getConfigurations() .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME); - t.setClasspath(runtimeConfiguration.plus(compileOnly)); + FileCollection compileOnlyThirdParty = thirdPartyDependenciesView(compileOnly); + t.getThirdPartyClasspath().from(runtimeThirdParty, compileOnlyThirdParty); t.getJarsToScan() .from( createFileCollectionFromNonTransitiveArtifactsView( @@ -78,7 +81,7 @@ public TaskProvider createTask(Project project) { t.getJavaHome().set(buildParams.flatMap(params -> params.getRuntimeJavaHome()).map(File::getPath)); t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile()); t.getJdkJarHellClasspath().from(jdkJarHellConfig); - t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly)); + t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnlyThirdParty)); }); return audit; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 442797775de2f..59ba9bae0a57d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -17,7 +17,6 @@ import org.gradle.api.JavaVersion; import org.gradle.api.file.ArchiveOperations; import org.gradle.api.file.ConfigurableFileCollection; -import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; import org.gradle.api.file.ProjectLayout; @@ -96,8 +95,6 @@ public abstract class ThirdPartyAuditTask extends DefaultTask { private final ProjectLayout projectLayout; - private FileCollection classpath; - @Inject public ThirdPartyAuditTask( ArchiveOperations archiveOperations, @@ -198,9 +195,7 @@ public Set getMissingClassExcludes() { public abstract Property getRuntimeJavaVersion(); @Classpath - public FileCollection getClasspath() { - return classpath; - } + public abstract ConfigurableFileCollection getThirdPartyClasspath(); @TaskAction public void runThirdPartyAudit() throws IOException { @@ -345,7 +340,7 @@ private String runForbiddenAPIsCli() throws IOException { if (javaHome.isPresent()) { spec.setExecutable(javaHome.get() + "/bin/java"); } - spec.classpath(getForbiddenAPIsClasspath(), classpath); + spec.classpath(getForbiddenAPIsClasspath(), getThirdPartyClasspath()); // Enable explicitly for each release as appropriate. Just JDK 20/21/22/23 for now, and just the vector module. if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21) || isJavaVersion(VERSION_22) || isJavaVersion(VERSION_23)) { spec.jvmArgs("--add-modules", "jdk.incubator.vector"); @@ -383,7 +378,7 @@ private boolean isJavaVersion(JavaVersion version) { private Set runJdkJarHellCheck() throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); ExecResult execResult = execOperations.javaexec(spec -> { - spec.classpath(getJdkJarHellClasspath(), classpath); + spec.classpath(getJdkJarHellClasspath(), getThirdPartyClasspath()); spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); spec.args(getJarExpandDir()); spec.setIgnoreExitValue(true); @@ -402,8 +397,4 @@ private Set runJdkJarHellCheck() throws IOException { return new TreeSet<>(Arrays.asList(jdkJarHellCheckList.split("\\r?\\n"))); } - public void setClasspath(FileCollection classpath) { - this.classpath = classpath; - } - } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java index 9080f62f19937..5d7386e2c2150 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java @@ -9,12 +9,16 @@ package org.elasticsearch.gradle.internal.util; +import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin; + import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ResolvableDependencies; import org.gradle.api.artifacts.component.ComponentIdentifier; +import org.gradle.api.artifacts.component.ProjectComponentIdentifier; import org.gradle.api.artifacts.result.ResolvedComponentResult; import org.gradle.api.artifacts.result.ResolvedDependencyResult; import org.gradle.api.file.FileCollection; +import org.gradle.api.provider.Provider; import org.gradle.api.specs.AndSpec; import org.gradle.api.specs.Spec; @@ -29,7 +33,7 @@ public static FileCollection createFileCollectionFromNonTransitiveArtifactsView( ) { ResolvableDependencies incoming = configuration.getIncoming(); return incoming.artifactView(viewConfiguration -> { - Set firstLevelDependencyComponents = incoming.getResolutionResult() + Provider> firstLevelDependencyComponents = incoming.getResolutionResult() .getRootComponent() .map( rootComponent -> rootComponent.getDependencies() @@ -39,12 +43,36 @@ public static FileCollection createFileCollectionFromNonTransitiveArtifactsView( .filter(dependency -> dependency.getSelected() instanceof ResolvedComponentResult) .map(dependency -> dependency.getSelected().getId()) .collect(Collectors.toSet()) - ) - .get(); + ); viewConfiguration.componentFilter( - new AndSpec<>(identifier -> firstLevelDependencyComponents.contains(identifier), componentFilter) + new AndSpec<>(identifier -> firstLevelDependencyComponents.get().contains(identifier), componentFilter) ); }).getFiles(); } + /** + * This method gives us an artifact view of a configuration that filters out all + * project dependencies that are not shadowed jars. + * Basically a thirdparty only view of the dependency tree. + */ + public static FileCollection thirdPartyDependenciesView(Configuration configuration) { + ResolvableDependencies incoming = configuration.getIncoming(); + return incoming.artifactView(v -> { + // resolve componentIdentifier for all shadowed project dependencies + Provider> shadowedDependencies = incoming.getResolutionResult() + .getRootComponent() + .map( + root -> root.getDependencies() + .stream() + .filter(dep -> dep instanceof ResolvedDependencyResult) + .map(dep -> (ResolvedDependencyResult) dep) + .filter(dep -> dep.getResolvedVariant().getDisplayName() == ShadowBasePlugin.COMPONENT_NAME) + .filter(dep -> dep.getSelected() instanceof ResolvedComponentResult) + .map(dep -> dep.getSelected().getId()) + .collect(Collectors.toSet()) + ); + // filter out project dependencies if they are not a shadowed dependency + v.componentFilter(i -> (i instanceof ProjectComponentIdentifier == false || shadowedDependencies.get().contains(i))); + }).getFiles(); + } } diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index 451701d74d690..9692af7adc5e6 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -279,6 +279,7 @@ "compatibilityChangeArea": { "type": "string", "enum": [ + "Aggregations", "Analysis", "Authorization", "Cluster and node setting", @@ -295,6 +296,7 @@ "Painless", "REST API", "Rollup", + "Search", "System requirement", "Transform" ] diff --git a/docs/build.gradle b/docs/build.gradle index dec0de8ffa844..93b7277327280 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -130,8 +130,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' - // disable the ILM history for doc tests to avoid potential lingering tasks that'd cause test flakiness + // disable the ILM and SLM history for doc tests to avoid potential lingering tasks that'd cause test flakiness setting 'indices.lifecycle.history_index_enabled', 'false' + setting 'slm.history_index_enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.realms.file.file.order', '0' setting 'xpack.security.authc.realms.native.native.order', '1' diff --git a/docs/changelog/116358.yaml b/docs/changelog/116358.yaml deleted file mode 100644 index 58b44a1e9bcf5..0000000000000 --- a/docs/changelog/116358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116358 -summary: Update Deberta tokenizer -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/116388.yaml b/docs/changelog/116388.yaml new file mode 100644 index 0000000000000..59cdafc9ec337 --- /dev/null +++ b/docs/changelog/116388.yaml @@ -0,0 +1,5 @@ +pr: 116388 +summary: Add support for partial shard results +area: EQL +type: enhancement +issues: [] diff --git a/docs/changelog/116423.yaml b/docs/changelog/116423.yaml deleted file mode 100644 index d6d10eab410e4..0000000000000 --- a/docs/changelog/116423.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116423 -summary: Support mTLS for the Elastic Inference Service integration inside the inference API -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/117153.yaml b/docs/changelog/117153.yaml deleted file mode 100644 index f7640c0a7ed6a..0000000000000 --- a/docs/changelog/117153.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117153 -summary: "ESQL: fix the column position in errors" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117778.yaml b/docs/changelog/117778.yaml new file mode 100644 index 0000000000000..880d4f831e533 --- /dev/null +++ b/docs/changelog/117778.yaml @@ -0,0 +1,5 @@ +pr: 117778 +summary: "[Connector APIs] Enforce index prefix for managed connectors" +area: Extract&Transform +type: feature +issues: [] diff --git a/docs/changelog/117989.yaml b/docs/changelog/117989.yaml new file mode 100644 index 0000000000000..e4967141b3ebd --- /dev/null +++ b/docs/changelog/117989.yaml @@ -0,0 +1,5 @@ +pr: 117989 +summary: ESQL Add esql hash function +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118266.yaml b/docs/changelog/118266.yaml new file mode 100644 index 0000000000000..1b14b12b973c5 --- /dev/null +++ b/docs/changelog/118266.yaml @@ -0,0 +1,5 @@ +pr: 118266 +summary: Prevent data nodes from sending stack traces to coordinator when `error_trace=false` +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/118366.yaml b/docs/changelog/118366.yaml new file mode 100644 index 0000000000000..cfeab1937738b --- /dev/null +++ b/docs/changelog/118366.yaml @@ -0,0 +1,22 @@ +pr: 118366 +summary: |- + Configuring a bind DN in an LDAP or Active Directory (AD) realm without a corresponding bind password + will prevent node from starting +area: Authentication +type: breaking +issues: [] +breaking: + title: -| + Configuring a bind DN in an LDAP or Active Directory (AD) realm without + a corresponding bind password will prevent node from starting + area: Cluster and node setting + details: -| + For LDAP or AD authentication realms, setting a bind DN (via the + `xpack.security.authc.realms.ldap.*.bind_dn` or `xpack.security.authc.realms.active_directory.*.bind_dn` + realm settings) without a bind password is a misconfiguration that may prevent successful authentication + to the node. Nodes will fail to start if a bind DN is specified without a password. + impact: -| + If you have a bind DN configured for an LDAP or AD authentication + realm, set a bind password for {ref}/ldap-realm.html#ldap-realm-configuration[LDAP] + or {ref}/active-directory-realm.html#ad-realm-configuration[Active Directory]. + Configuring a bind DN without a password prevents the misconfigured node from starting. diff --git a/docs/changelog/118380.yaml b/docs/changelog/118380.yaml deleted file mode 100644 index 8b26c871fb172..0000000000000 --- a/docs/changelog/118380.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 118380 -summary: Restore original "is within leaf" value in `SparseVectorFieldMapper` -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/118484.yaml b/docs/changelog/118484.yaml new file mode 100644 index 0000000000000..41db476a42523 --- /dev/null +++ b/docs/changelog/118484.yaml @@ -0,0 +1,14 @@ +pr: 118484 +summary: Remove date histogram boolean support +area: Aggregations +type: breaking +issues: [] +breaking: + title: Remove date histogram boolean support + area: Aggregations + details: Elasticsearch no longer allows running Date Histogram aggregations + over boolean fields. Instead, use Terms aggregation for boolean + fields. + impact: We expect the impact to be minimal, as this never produced good + results, and has been deprecated for years. + notable: false diff --git a/docs/changelog/118544.yaml b/docs/changelog/118544.yaml new file mode 100644 index 0000000000000..d59783c4e6194 --- /dev/null +++ b/docs/changelog/118544.yaml @@ -0,0 +1,5 @@ +pr: 118544 +summary: ESQL - Remove restrictions for disjunctions in full text functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118617.yaml b/docs/changelog/118617.yaml new file mode 100644 index 0000000000000..a8793a114e913 --- /dev/null +++ b/docs/changelog/118617.yaml @@ -0,0 +1,5 @@ +pr: 118617 +summary: Add support for `sparse_vector` queries against `semantic_text` fields +area: "Search" +type: enhancement +issues: [] diff --git a/docs/changelog/118671.yaml b/docs/changelog/118671.yaml new file mode 100644 index 0000000000000..3931cc4179037 --- /dev/null +++ b/docs/changelog/118671.yaml @@ -0,0 +1,11 @@ +pr: 118671 +summary: Adjust `random_score` default field to `_seq_no` field +area: Search +type: breaking +issues: [] +breaking: + title: Adjust `random_score` default field to `_seq_no` field + area: Search + details: When providing a 'seed' parameter to a 'random_score' function in the 'function_score' query but NOT providing a 'field', the default 'field' is switched from '_id' to '_seq_no'. + impact: The random scoring and ordering may change when providing a 'seed' and not providing a 'field' to a 'random_score' function. + notable: false diff --git a/docs/changelog/118674.yaml b/docs/changelog/118674.yaml new file mode 100644 index 0000000000000..eeb90a3b38f66 --- /dev/null +++ b/docs/changelog/118674.yaml @@ -0,0 +1,5 @@ +pr: 118674 +summary: Ignore failures from renormalizing buckets in read-only index +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118681.yaml b/docs/changelog/118681.yaml new file mode 100644 index 0000000000000..a186c05e6cd7d --- /dev/null +++ b/docs/changelog/118681.yaml @@ -0,0 +1,6 @@ +pr: 118681 +summary: '`ConnectTransportException` returns retryable BAD_GATEWAY' +area: Network +type: enhancement +issues: + - 118320 diff --git a/docs/changelog/118697.yaml b/docs/changelog/118697.yaml new file mode 100644 index 0000000000000..6e24e6ae4b47f --- /dev/null +++ b/docs/changelog/118697.yaml @@ -0,0 +1,6 @@ +pr: 118697 +summary: Esql implicit casting for date nanos +area: ES|QL +type: enhancement +issues: + - 118476 diff --git a/docs/changelog/118757.yaml b/docs/changelog/118757.yaml new file mode 100644 index 0000000000000..956e220f21aeb --- /dev/null +++ b/docs/changelog/118757.yaml @@ -0,0 +1,5 @@ +pr: 118757 +summary: Improve handling of nested fields in index reader wrappers +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/118816.yaml b/docs/changelog/118816.yaml new file mode 100644 index 0000000000000..f1c1eac90dbcf --- /dev/null +++ b/docs/changelog/118816.yaml @@ -0,0 +1,6 @@ +pr: 118816 +summary: Support flattened field with downsampling +area: Downsampling +type: bug +issues: + - 116319 diff --git a/docs/changelog/118837.yaml b/docs/changelog/118837.yaml new file mode 100644 index 0000000000000..38cd32f3a3513 --- /dev/null +++ b/docs/changelog/118837.yaml @@ -0,0 +1,5 @@ +pr: 118837 +summary: Add missing timeouts to rest-api-spec ILM APIs +area: "ILM+SLM" +type: bug +issues: [] diff --git a/docs/changelog/118844.yaml b/docs/changelog/118844.yaml new file mode 100644 index 0000000000000..f9f92bcaeb8cb --- /dev/null +++ b/docs/changelog/118844.yaml @@ -0,0 +1,5 @@ +pr: 118844 +summary: Add missing timeouts to rest-api-spec ingest APIs +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/118858.yaml b/docs/changelog/118858.yaml new file mode 100644 index 0000000000000..a2161df1c84c7 --- /dev/null +++ b/docs/changelog/118858.yaml @@ -0,0 +1,5 @@ +pr: 118858 +summary: Lookup join on multiple join fields not yet supported +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118890.yaml b/docs/changelog/118890.yaml new file mode 100644 index 0000000000000..d3fc17157f130 --- /dev/null +++ b/docs/changelog/118890.yaml @@ -0,0 +1,5 @@ +pr: 118890 +summary: Add action to create index from a source index +area: Data streams +type: enhancement +issues: [] diff --git a/docs/reference/alias.asciidoc b/docs/reference/alias.asciidoc index 9d784f530d63c..f676644c4ec48 100644 --- a/docs/reference/alias.asciidoc +++ b/docs/reference/alias.asciidoc @@ -407,3 +407,24 @@ POST _aliases } ---- // TEST[s/^/PUT my-index-2099.05.06-000001\n/] + +[discrete] +[[remove-index]] +=== Remove an index + +To remove an index, use the aliases API's `remove_index` action. + +[source,console] +---- +POST _aliases +{ + "actions": [ + { + "remove_index": { + "index": "my-index-2099.05.06-000001" + } + } + ] +} +---- +// TEST[s/^/PUT my-index-2099.05.06-000001\n/] diff --git a/docs/reference/connector/docs/connectors-content-extraction.asciidoc b/docs/reference/connector/docs/connectors-content-extraction.asciidoc index a87d38c9bf531..744fe1d87cb45 100644 --- a/docs/reference/connector/docs/connectors-content-extraction.asciidoc +++ b/docs/reference/connector/docs/connectors-content-extraction.asciidoc @@ -8,7 +8,7 @@ The logic for content extraction is defined in {connectors-python}/connectors/ut While intended primarily for PDF and Microsoft Office formats, you can use any of the <>. Enterprise Search uses an {ref}/ingest.html[Elasticsearch ingest pipeline^] to power the web crawler's binary content extraction. -The default pipeline, `ent-search-generic-ingestion`, is automatically created when Enterprise Search first starts. +The default pipeline, `search-default-ingestion`, is automatically created when Enterprise Search first starts. You can {ref}/ingest.html#create-manage-ingest-pipelines[view^] this pipeline in Kibana. Customizing your pipeline usage is also an option. diff --git a/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc b/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc index 278478c908bf0..62a99928bfb46 100644 --- a/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc +++ b/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc @@ -13,7 +13,7 @@ The following diagram provides an overview of how content extraction, sync rules [.screenshot] image::images/pipelines-extraction-sync-rules.png[Architecture diagram of data pipeline with content extraction, sync rules, and ingest pipelines] -By default, only the connector specific logic (2) and the default `ent-search-generic-ingestion` pipeline (6) extract and transform your data, as configured in your deployment. +By default, only the connector specific logic (2) and the default `search-default-ingestion` pipeline (6) extract and transform your data, as configured in your deployment. The following tools are available for more advanced use cases: @@ -50,4 +50,4 @@ Use ingest pipelines for data enrichment, normalization, and more. Elastic connectors use a default ingest pipeline, which you can copy and customize to meet your needs. -Refer to {ref}/ingest-pipeline-search.html[ingest pipelines in Search] in the {es} documentation. \ No newline at end of file +Refer to {ref}/ingest-pipeline-search.html[ingest pipelines in Search] in the {es} documentation. diff --git a/docs/reference/eql/eql-search-api.asciidoc b/docs/reference/eql/eql-search-api.asciidoc index d7f10f4627f6c..0fd490609277f 100644 --- a/docs/reference/eql/eql-search-api.asciidoc +++ b/docs/reference/eql/eql-search-api.asciidoc @@ -88,6 +88,53 @@ request that targets only `bar*` still returns an error. + Defaults to `true`. +`allow_partial_search_results`:: +(Optional, Boolean) + +If `false`, the request returns an error if one or more shards involved in the query are unavailable. ++ +If `true`, the query is executed only on the available shards, ignoring shard request timeouts and +<>. ++ +Defaults to `false`. ++ +To override the default for this field, set the +`xpack.eql.default_allow_partial_results` cluster setting to `true`. + + +[IMPORTANT] +==== +You can also specify this value using the `allow_partial_search_results` request body parameter. +If both parameters are specified, only the query parameter is used. +==== + + +`allow_partial_sequence_results`:: +(Optional, Boolean) + + +Used together with `allow_partial_search_results=true`, controls the behavior of sequence queries specifically +(if `allow_partial_search_results=false`, this setting has no effect). +If `true` and if some shards are unavailable, the sequences are calculated on available shards only. ++ +If `false` and if some shards are unavailable, the query only returns information about the shard failures, +but no further results. ++ +Defaults to `false`. ++ +Consider that sequences calculated with `allow_partial_search_results=true` can return incorrect results +(eg. if a <> clause matches records in unavailable shards) ++ +To override the default for this field, set the +`xpack.eql.default_allow_partial_sequence_results` cluster setting to `true`. + + +[IMPORTANT] +==== +You can also specify this value using the `allow_partial_sequence_results` request body parameter. +If both parameters are specified, only the query parameter is used. +==== + `ccs_minimize_roundtrips`:: (Optional, Boolean) If `true`, network round-trips between the local and the remote cluster are minimized when running cross-cluster search (CCS) requests. diff --git a/docs/reference/esql/functions/description/hash.asciidoc b/docs/reference/esql/functions/description/hash.asciidoc new file mode 100644 index 0000000000000..e074915c5132a --- /dev/null +++ b/docs/reference/esql/functions/description/hash.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index 4afea30660339..264efc191748f 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -116,4 +116,18 @@ include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression] |=== include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression-result] |=== +Sometimes you need to change the start value of each bucket by a given duration (similar to date histogram +aggregation's <> parameter). To do so, you will need to +take into account how the language handles expressions within the `STATS` command: if these contain functions or +arithmetic operators, a virtual `EVAL` is inserted before and/or after the `STATS` command. Consequently, a double +compensation is needed to adjust the bucketed date value before the aggregation and then again after. For instance, +inserting a negative offset of `1 hour` to buckets of `1 year` looks like this: +[source.merge.styled,esql] +---- +include::{esql-specs}/bucket.csv-spec[tag=bucketWithOffset] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/bucket.csv-spec[tag=bucketWithOffset-result] +|=== diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 18802f5ff8fef..3d96de05c8407 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -1598,7 +1598,8 @@ "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS c = COUNT(1) BY b = BUCKET(salary, 5000.)\n| SORT b", "FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())", "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket", - "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2" + "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2", + "FROM employees \n| STATS dates = VALUES(birth_date) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count\n| LIMIT 3" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/hash.json b/docs/reference/esql/functions/kibana/definition/hash.json new file mode 100644 index 0000000000000..17a60cf45acfe --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/hash.json @@ -0,0 +1,82 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "hash", + "description" : "Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512.", + "signatures" : [ + { + "params" : [ + { + "name" : "algorithm", + "type" : "keyword", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "keyword", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "text", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "text", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/docs/hash.md b/docs/reference/esql/functions/kibana/docs/hash.md new file mode 100644 index 0000000000000..9826e80ec5bec --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/hash.md @@ -0,0 +1,7 @@ + + +### HASH +Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. + diff --git a/docs/reference/esql/functions/layout/hash.asciidoc b/docs/reference/esql/functions/layout/hash.asciidoc new file mode 100644 index 0000000000000..27c55ada6319b --- /dev/null +++ b/docs/reference/esql/functions/layout/hash.asciidoc @@ -0,0 +1,14 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-hash]] +=== `HASH` + +*Syntax* + +[.text-center] +image::esql/functions/signature/hash.svg[Embedded,opts=inline] + +include::../parameters/hash.asciidoc[] +include::../description/hash.asciidoc[] +include::../types/hash.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/hash.asciidoc b/docs/reference/esql/functions/parameters/hash.asciidoc new file mode 100644 index 0000000000000..d47a82d4ab214 --- /dev/null +++ b/docs/reference/esql/functions/parameters/hash.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`algorithm`:: +Hash algorithm to use. + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/signature/hash.svg b/docs/reference/esql/functions/signature/hash.svg new file mode 100644 index 0000000000000..f819e14c9d1a4 --- /dev/null +++ b/docs/reference/esql/functions/signature/hash.svg @@ -0,0 +1 @@ +HASH(algorithm,input) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index ce9636f5c5a3a..da9580a55151a 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -13,6 +13,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -37,6 +38,7 @@ include::layout/byte_length.asciidoc[] include::layout/concat.asciidoc[] include::layout/ends_with.asciidoc[] include::layout/from_base64.asciidoc[] +include::layout/hash.asciidoc[] include::layout/left.asciidoc[] include::layout/length.asciidoc[] include::layout/locate.asciidoc[] diff --git a/docs/reference/esql/functions/types/hash.asciidoc b/docs/reference/esql/functions/types/hash.asciidoc new file mode 100644 index 0000000000000..786ba03b2aa60 --- /dev/null +++ b/docs/reference/esql/functions/types/hash.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +algorithm | input | result +keyword | keyword | keyword +keyword | text | keyword +text | keyword | keyword +text | text | keyword +|=== diff --git a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc index 6ec261fabc448..0b3c17fb2caae 100644 --- a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc @@ -24,7 +24,7 @@ and retention requirements. You want to send log files to an {es} cluster so you can visualize and analyze the data. This data has the following retention requirements: -* When the write index reaches 50GB or is 30 days old, roll over to a new index. +* When the primary shard size of the write index reaches 50GB or the index is 30 days old, roll over to a new index. * After rollover, keep indices in the hot data tier for 30 days. * 30 days after rollover: ** Move indices to the warm data tier. @@ -84,7 +84,7 @@ To save the `logs@lifecycle` policy as a new policy in {kib}: . On the **Edit policy logs** page, toggle **Save as new policy**, and then provide a new name for the policy, for example, `logs-custom`. The `logs@lifecycle` policy uses the recommended rollover defaults: Start writing to a new -index when the current write index reaches 50GB or becomes 30 days old. +index when the primary shard size of the current write index reaches 50GB or the index becomes 30 days old. To view or change the rollover settings, click **Advanced settings** for the hot phase. Then disable **Use recommended defaults** to display the rollover diff --git a/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png b/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png index 14ff66e410835..d7f314cedb261 100644 Binary files a/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png and b/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png differ diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index 1b001a3175b8c..04b086a758f9d 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -172,8 +172,9 @@ The API returns the following response: "attributes": {}, "roles": [...], "version": "8.10.0", - "min_index_version": 7000099, - "max_index_version": 8100099 + "min_index_version": 8000099, + "min_read_only_index_version": 7000099, + "max_index_version": 9004000 }, "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <4> "allocation" : "primary|replica|unused" <5> @@ -193,6 +194,7 @@ The API returns the following response: // TESTRESPONSE[s/"roles": \[[^]]*\]/"roles": $body.$_path/] // TESTRESPONSE[s/"8.10.0"/\$node_version/] // TESTRESPONSE[s/"min_index_version": 7000099/"min_index_version": $body.$_path/] +// TESTRESPONSE[s/"min_index_version": 7000099/"min_index_version": $body.$_path/] // TESTRESPONSE[s/"max_index_version": 8100099/"max_index_version": $body.$_path/] diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index 006cc96294477..73642b3bb3447 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -88,7 +88,7 @@ The `monitor_ml` <> is req To create the index-specific ML inference pipeline, go to *Search -> Content -> Indices -> -> Pipelines* in the Kibana UI. -If you only see the `ent-search-generic-ingestion` pipeline, you will need to click *Copy and customize* to create index-specific pipelines. +If you only see the `search-default-ingestion` pipeline, you will need to click *Copy and customize* to create index-specific pipelines. This will create the `{index_name}@ml-inference` pipeline. Once your index-specific ML inference pipeline is ready, you can add inference processors that use your ML trained models. diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc index e414dacaab964..272c6ba2884b9 100644 --- a/docs/reference/ingest/search-ingest-pipelines.asciidoc +++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc @@ -40,7 +40,7 @@ Considerations such as error handling, conditional execution, sequencing, versio To this end, when you create indices for search use cases, (including {enterprise-search-ref}/crawler.html[Elastic web crawler], <>. , and API indices), each index already has a pipeline set up with several processors that optimize your content for search. -This pipeline is called `ent-search-generic-ingestion`. +This pipeline is called `search-default-ingestion`. While it is a "managed" pipeline (meaning it should not be tampered with), you can view its details via the Kibana UI or the Elasticsearch API. You can also <>. @@ -56,14 +56,14 @@ This will not effect existing indices. Each index also provides the capability to easily create index-specific ingest pipelines with customizable processing. If you need that extra flexibility, you can create a custom pipeline by going to your pipeline settings and choosing to "copy and customize". -This will replace the index's use of `ent-search-generic-ingestion` with 3 newly generated pipelines: +This will replace the index's use of `search-default-ingestion` with 3 newly generated pipelines: 1. `` 2. `@custom` 3. `@ml-inference` -Like `ent-search-generic-ingestion`, the first of these is "managed", but the other two can and should be modified to fit your needs. -You can view these pipelines using the platform tools (Kibana UI, Elasticsearch API), and can also +Like `search-default-ingestion`, the first of these is "managed", but the other two can and should be modified to fit your needs. +You can view these pipelines using the platform tools (Kibana UI, Elasticsearch API), and can also <>. [discrete#ingest-pipeline-search-pipeline-settings] @@ -123,7 +123,7 @@ If the pipeline is not specified, the underscore-prefixed fields will actually b === Details [discrete#ingest-pipeline-search-details-generic-reference] -==== `ent-search-generic-ingestion` Reference +==== `search-default-ingestion` Reference You can access this pipeline with the <> or via Kibana's < Ingest Pipelines>> UI. @@ -149,7 +149,7 @@ If you want to make customizations, we recommend you utilize index-specific pipe [discrete#ingest-pipeline-search-details-generic-reference-params] ===== Control flow parameters -The `ent-search-generic-ingestion` pipeline does not always run all processors. +The `search-default-ingestion` pipeline does not always run all processors. It utilizes a feature of ingest pipelines to <> based on the contents of each individual document. * `_extract_binary_content` - if this field is present and has a value of `true` on a source document, the pipeline will attempt to run the `attachment`, `set_body`, and `remove_replacement_chars` processors. @@ -167,8 +167,8 @@ See <>. ==== Index-specific ingest pipelines In the Kibana UI for your index, by clicking on the Pipelines tab, then *Settings > Copy and customize*, you can quickly generate 3 pipelines which are specific to your index. -These 3 pipelines replace `ent-search-generic-ingestion` for the index. -There is nothing lost in this action, as the `` pipeline is a superset of functionality over the `ent-search-generic-ingestion` pipeline. +These 3 pipelines replace `search-default-ingestion` for the index. +There is nothing lost in this action, as the `` pipeline is a superset of functionality over the `search-default-ingestion` pipeline. [IMPORTANT] ==== @@ -179,7 +179,7 @@ Refer to the Elastic subscriptions pages for https://www.elastic.co/subscription [discrete#ingest-pipeline-search-details-specific-reference] ===== `` Reference -This pipeline looks and behaves a lot like the <>, but with <>. +This pipeline looks and behaves a lot like the <>, but with <>. [WARNING] ========================= @@ -197,7 +197,7 @@ If you want to make customizations, we recommend you utilize <>, the index-specific pipeline also defines: +In addition to the processors inherited from the <>, the index-specific pipeline also defines: * `index_ml_inference_pipeline` - this uses the <> processor to run the `@ml-inference` pipeline. This processor will only be run if the source document includes a `_run_ml_inference` field with the value `true`. @@ -206,7 +206,7 @@ In addition to the processors inherited from the <` pipeline does not always run all processors. +Like the `search-default-ingestion` pipeline, the `` pipeline does not always run all processors. In addition to the `_extract_binary_content` and `_reduce_whitespace` control flow parameters, the `` pipeline also supports: * `_run_ml_inference` - if this field is present and has a value of `true` on a source document, the pipeline will attempt to run the `index_ml_inference_pipeline` processor. @@ -220,7 +220,7 @@ See <>. ===== `@ml-inference` Reference This pipeline is empty to start (no processors), but can be added to via the Kibana UI either through the Pipelines tab of your index, or from the *Stack Management > Ingest Pipelines* page. -Unlike the `ent-search-generic-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". +Unlike the `search-default-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". It's possible to add one or more ML inference pipelines to an index in the *Content* UI. This pipeline will serve as a container for all of the ML inference pipelines configured for the index. @@ -241,7 +241,7 @@ The `monitor_ml` Elasticsearch cluster permission is required in order to manage This pipeline is empty to start (no processors), but can be added to via the Kibana UI either through the Pipelines tab of your index, or from the *Stack Management > Ingest Pipelines* page. -Unlike the `ent-search-generic-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". +Unlike the `search-default-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". You are encouraged to make additions and edits to this pipeline, provided its name remains the same. This provides a convenient hook from which to add custom processing and transformations for your data. @@ -272,9 +272,12 @@ extraction. These changes should be re-applied to each index's `@custom` pipeline in order to ensure a consistent data processing experience. In 8.5+, the <> is required *in addition* to the configurations mentioned in the {enterprise-search-ref}/crawler-managing.html#crawler-managing-binary-content[Elastic web crawler Guide]. -* `ent-search-generic-ingestion` - Since 8.5, Native Connectors, Connector Clients, and new (>8.4) Elastic web crawler indices will all make use of this pipeline by default. +* `ent-search-generic-ingestion` - Since 8.5, Native Connectors, Connector Clients, and new (>8.4) Elastic web crawler indices all made use of this pipeline by default. + This pipeline evolved into the `search-default-ingestion` pipeline. + +* `search-default-ingestion` - Since 9.0, Connectors have made use of this pipeline by default. You can <> above. - As this pipeline is "managed", any modifications that were made to `app_search_crawler` and/or `ent_search_crawler` should NOT be made to `ent-search-generic-ingestion`. + As this pipeline is "managed", any modifications that were made to `app_search_crawler` and/or `ent_search_crawler` should NOT be made to `search-default-ingestion`. Instead, if such customizations are desired, you should utilize <>, placing all modifications in the `@custom` pipeline(s). ============= diff --git a/docs/reference/ingest/search-nlp-tutorial.asciidoc b/docs/reference/ingest/search-nlp-tutorial.asciidoc index afdceeeb8bac2..b23a15c96b1a2 100644 --- a/docs/reference/ingest/search-nlp-tutorial.asciidoc +++ b/docs/reference/ingest/search-nlp-tutorial.asciidoc @@ -164,8 +164,8 @@ Now it's time to create an inference pipeline. 1. From the overview page for your `search-photo-comments` index in "Search", click the *Pipelines* tab. By default, Elasticsearch does not create any index-specific ingest pipelines. -2. Because we want to customize these pipelines, we need to *Copy and customize* the `ent-search-generic-ingestion` ingest pipeline. -Find this option above the settings for the `ent-search-generic-ingestion` ingest pipeline. +2. Because we want to customize these pipelines, we need to *Copy and customize* the `search-default-ingestion` ingest pipeline. +Find this option above the settings for the `search-default-ingestion` ingest pipeline. This will create two new index-specific ingest pipelines. Next, we'll add an inference pipeline. diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index 6f7e2a4d9f988..bf9c4d14db290 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -138,8 +138,8 @@ normal priority deployments. Controls how many inference requests are allowed in the queue at a time. Every machine learning node in the cluster where the model can be allocated has a queue of this size; when the number of requests exceeds the total value, -new requests are rejected with a 429 error. Defaults to 1024. Max allowed value -is 1000000. +new requests are rejected with a 429 error. Defaults to 10000. Max allowed value +is 100000. `threads_per_allocation`:: (Optional, integer) @@ -173,7 +173,7 @@ The API returns the following results: "model_bytes": 265632637, "threads_per_allocation" : 1, "number_of_allocations" : 1, - "queue_capacity" : 1024, + "queue_capacity" : 10000, "priority": "normal" }, "routing_table": { @@ -229,4 +229,4 @@ POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_se } } -------------------------------------------------- -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 86a0e567f6eec..f3576db0c786c 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -25,6 +25,21 @@ TIP: This setup doesn't run multiple {es} nodes or {kib} by default. To create a multi-node cluster with {kib}, use Docker Compose instead. See <>. +[[docker-wolfi-hardened-image]] +===== Hardened Docker images + +You can also use the hardened https://wolfi.dev/[Wolfi] image for additional security. +Using Wolfi images requires Docker version 20.10.10 or higher. + +To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. + +For example: + +[source,sh,subs="attributes"] +---- +docker pull {docker-wolfi-image} +---- + ===== Start a single-node cluster . Install Docker. Visit https://docs.docker.com/get-docker/[Get Docker] to @@ -55,12 +70,6 @@ docker pull {docker-image} // REVIEWED[DEC.10.24] -- -Alternatevely, you can use the Wolfi based image. Using Wolfi based images requires Docker version 20.10.10 or superior. -[source,sh,subs="attributes"] ----- -docker pull {docker-wolfi-image} ----- - . Optional: Install https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your environment. Then use Cosign to verify the {es} image's signature. diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java index 5249077bdfdbb..7adf6a09e9a19 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java @@ -96,6 +96,8 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { } logger.info("Executing search"); + // we have to explicitly set error_trace=true for the later exception check for `TimeSeriesIndexSearcher` + client().threadPool().getThreadContext().putHeader("error_trace", "true"); TimeSeriesAggregationBuilder timeSeriesAggregationBuilder = new TimeSeriesAggregationBuilder("test_agg"); ActionFuture searchResponse = prepareSearch("test").setQuery(matchAllQuery()) .addAggregation( diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 8c365a1362f85..35c01b5b9296f 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -161,7 +161,7 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { for (int i = 0; i < iters; i++) { final Index index = new Index("test", "_na_"); final String name = "ngr"; - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); boolean reverse = random().nextBoolean(); if (reverse) { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java index 7b962538c2a10..153c3e9549285 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java @@ -56,7 +56,7 @@ public void testPersianAnalyzerPostLucene10() throws IOException { public void testPersianAnalyzerPreLucene10() throws IOException { IndexVersion preLucene10Version = IndexVersionUtils.randomVersionBetween( random(), - IndexVersionUtils.getFirstVersion(), + IndexVersionUtils.getLowestReadCompatibleVersion(), IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) ); Settings settings = ESTestCase.indexSettings(1, 1) diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java index 1af44bc71f35d..29e27e62e3164 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java @@ -57,7 +57,7 @@ public void testRomanianAnalyzerPostLucene10() throws IOException { public void testRomanianAnalyzerPreLucene10() throws IOException { IndexVersion preLucene10Version = IndexVersionUtils.randomVersionBetween( random(), - IndexVersionUtils.getFirstVersion(), + IndexVersionUtils.getLowestReadCompatibleVersion(), IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) ); Settings settings = ESTestCase.indexSettings(1, 1) diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java index bb06c221873b5..4e774d92e3d62 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -39,7 +39,7 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { public void testEnglishFilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_english.type", "stemmer") .put("index.analysis.filter.my_english.language", "english") @@ -66,7 +66,7 @@ public void testPorter2FilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_porter2.type", "stemmer") .put("index.analysis.filter.my_porter2.language", "porter2") @@ -90,7 +90,7 @@ public void testPorter2FilterFactory() throws IOException { } public void testMultipleLanguagesThrowsException() throws IOException { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_english.type", "stemmer") .putList("index.analysis.filter.my_english.language", "english", "light_english") @@ -142,7 +142,7 @@ private static Analyzer createGermanStemmer(String variant, IndexVersion v) thro } public void testKpDeprecation() throws IOException { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_kp.type", "stemmer") .put("index.analysis.filter.my_kp.language", "kp") @@ -155,7 +155,7 @@ public void testKpDeprecation() throws IOException { } public void testLovinsDeprecation() throws IOException { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_lovins.type", "stemmer") .put("index.analysis.filter.my_lovins.language", "lovins") diff --git a/muted-tests.yml b/muted-tests.yml index 42845fda82180..a06334146ed7b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -87,9 +87,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115816 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=cat.shards/10_basic/Help} - issue: https://github.com/elastic/elasticsearch/issues/116110 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 @@ -156,24 +153,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117473 - class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/117525 -- class: "org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.single_node.EsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.mixed.MultiClusterEsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/118460 -- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/118460 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {scoring.QstrWithFieldAndScoringSortedEval} - issue: https://github.com/elastic/elasticsearch/issues/117751 - class: org.elasticsearch.search.ccs.CrossClusterIT method: testCancel issue: https://github.com/elastic/elasticsearch/issues/108061 @@ -259,8 +238,6 @@ tests: - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} issue: https://github.com/elastic/elasticsearch/issues/118217 -- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/118224 - class: org.elasticsearch.packaging.test.ArchiveTests method: test60StartAndStop issue: https://github.com/elastic/elasticsearch/issues/118216 @@ -305,6 +282,17 @@ tests: - class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests method: testPruneSome issue: https://github.com/elastic/elasticsearch/issues/118728 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/indices/shard-stores/line_150} + issue: https://github.com/elastic/elasticsearch/issues/118896 +- class: org.elasticsearch.cluster.service.MasterServiceTests + method: testThreadContext + issue: https://github.com/elastic/elasticsearch/issues/118914 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + method: test {yaml=indices.create/20_synthetic_source/create index with use_synthetic_source} + issue: https://github.com/elastic/elasticsearch/issues/118955 +- class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT + issue: https://github.com/elastic/elasticsearch/issues/118970 # Examples: # diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index d8f906b23d523..28bcac9f0242d 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -67,6 +67,9 @@ excludeList.add('indices.resolve_index/20_resolve_system_index/*') // Excluded because the error has changed excludeList.add('aggregations/percentiles_hdr_metric/Negative values test') +// sync_id is removed in 9.0 +excludeList.add("cat.shards/10_basic/Help") + def clusterPath = getPath() buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java new file mode 100644 index 0000000000000..6f9ab8ccdfdec --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http; + +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NByteArrayEntity; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; + +public class SearchErrorTraceIT extends HttpSmokeTestCase { + private AtomicBoolean hasStackTrace; + + @Before + private void setupMessageListener() { + internalCluster().getDataNodeInstances(TransportService.class).forEach(ts -> { + ts.addMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + TransportMessageListener.super.onResponseSent(requestId, action, error); + if (action.startsWith("indices:data/read/search")) { + Optional throwable = ExceptionsHelper.unwrapCausesAndSuppressed( + error, + t -> t.getStackTrace().length > 0 + ); + hasStackTrace.set(throwable.isPresent()); + } + } + }); + }); + } + + private void setupIndexWithDocs() { + createIndex("test1", "test2"); + indexRandom( + true, + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) + ); + refresh(); + } + + public void testSearchFailingQueryErrorTraceDefault() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testSearchFailingQueryErrorTraceTrue() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + getRestClient().performRequest(searchRequest); + assertTrue(hasStackTrace.get()); + } + + public void testSearchFailingQueryErrorTraceFalse() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceDefault() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceTrue() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + searchRequest.addParameter("error_trace", "true"); + getRestClient().performRequest(searchRequest); + assertTrue(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceFalse() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + searchRequest.addParameter("error_trace", "false"); + getRestClient().performRequest(searchRequest); + + assertFalse(hasStackTrace.get()); + } +} diff --git a/renovate.json b/renovate.json index c1637ae651c1c..71c6301f8e0c2 100644 --- a/renovate.json +++ b/renovate.json @@ -7,8 +7,8 @@ "schedule": [ "after 1pm on tuesday" ], - "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery"], - "baseBranches": ["main", "8.x"], + "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery", "auto-merge-without-approval"], + "baseBranches": ["main", "8.x", "8.17", "8.16"], "packageRules": [ { "groupName": "wolfi (versioned)", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json index c854c44d9d761..0f9af508f4c16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json @@ -41,6 +41,16 @@ "type": "time", "description": "Update the time interval in which the results (partial or final) for this search will be available", "default": "5d" + }, + "allow_partial_search_results": { + "type":"boolean", + "description":"Control whether the query should keep running in case of shard failures, and return partial results", + "default":false + }, + "allow_partial_sequence_results": { + "type":"boolean", + "description":"Control whether a sequence query should return partial results or no results at all in case of shard failures. This option has effect only if [allow_partial_search_results] is true.", + "default":false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json index 2ff1031ad5c52..cd6397fb61586 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json @@ -25,6 +25,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json index c793ed09281ae..94c37adb802f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -33,6 +33,10 @@ "only_errors": { "type": "boolean", "description": "filters the indices included in the response to ones in an ILM error state, implies only_managed" + }, + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json index 17bf813093dd6..5abdfac7f5b30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json @@ -31,6 +31,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json index 5a12a778241b3..b7fdbe04a0ffb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The lifecycle policy definition to register" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json index 88b020071ab82..7141673ff9a9d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json index 8401f93badfc4..962fa77263ee4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json index fe50da720a4da..f76d328836d90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json @@ -26,6 +26,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json index e97d1da276906..341ff5081e270 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json @@ -26,6 +26,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json index 6d088e3f164f4..9c2677d1f7b2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json @@ -27,6 +27,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } }, "body":{ "description":"The database configuration definition", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json index 18487969b1a90..782048b98160a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json @@ -27,6 +27,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } }, "body":{ "description":"The database configuration definition", diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index af3d88fb35734..edb684168278b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -2012,3 +2012,43 @@ synthetic_source with copy_to pointing inside dynamic object: hits.hits.2.fields: c.copy.keyword: [ "hello", "zap" ] +--- +create index with use_synthetic_source: + - requires: + cluster_features: ["mapper.synthetic_recovery_source"] + reason: requires synthetic recovery source + + - do: + indices.create: + index: test + body: + settings: + index: + recovery: + use_synthetic_source: true + mapping: + source: + mode: synthetic + + - do: + indices.get_settings: {} + - match: { test.settings.index.mapping.source.mode: synthetic} + - is_true: test.settings.index.recovery.use_synthetic_source + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "field": "aaaa" }' + - '{ "create": { } }' + - '{ "field": "bbbb" }' + + - do: + indices.disk_usage: + index: test + run_expensive_tasks: true + flush: false + - gt: { test.store_size_in_bytes: 0 } + - is_false: test.fields._recovery_source diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 4f6d24b419595..ca9fdb27ac389 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -236,7 +236,7 @@ private static IndexMetadata indexMetadata(final Client client, final String ind public void testCreateShrinkIndex() { internalCluster().ensureAtLeastNumDataNodes(2); - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomWriteVersion(); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version) ).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 58b9af7724aaa..e8e4eb7562462 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -71,6 +71,7 @@ import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; import static org.elasticsearch.test.index.IndexVersionUtils.randomVersion; +import static org.elasticsearch.test.index.IndexVersionUtils.randomWriteVersion; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -218,7 +219,7 @@ private ClusterState.Builder randomCoordinationMetadata(ClusterState clusterStat private DiscoveryNode randomNode(String nodeId) { Version nodeVersion = VersionUtils.randomVersion(random()); - IndexVersion indexVersion = randomVersion(random()); + IndexVersion indexVersion = randomVersion(); return DiscoveryNodeUtils.builder(nodeId) .roles(emptySet()) .version(nodeVersion, IndexVersion.fromId(indexVersion.id() - 1_000_000), indexVersion) @@ -561,7 +562,7 @@ public IndexMetadata randomCreate(String name) { IndexMetadata.Builder builder = IndexMetadata.builder(name); Settings.Builder settingsBuilder = Settings.builder(); setRandomIndexSettings(random(), settingsBuilder); - settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetadata.SETTING_VERSION_CREATED, randomVersion(random())); + settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetadata.SETTING_VERSION_CREATED, randomWriteVersion()); builder.settings(settingsBuilder); builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN, TransportVersion.current()); @@ -736,7 +737,7 @@ public ClusterState.Custom randomCreate(String name) { ImmutableOpenMap.of(), null, SnapshotInfoTestUtils.randomUserMetadata(), - randomVersion(random()) + randomVersion() ) ); case 1 -> new RestoreInProgress.Builder().add( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index 48aef0d348045..891b0319f880d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -47,7 +47,7 @@ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception PreBuiltAnalyzers preBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt]; String name = preBuiltAnalyzer.name().toLowerCase(Locale.ROOT); - IndexVersion randomVersion = IndexVersionUtils.randomVersion(random()); + IndexVersion randomVersion = IndexVersionUtils.randomWriteVersion(); if (loadedAnalyzers.containsKey(preBuiltAnalyzer) == false) { loadedAnalyzers.put(preBuiltAnalyzer, new ArrayList<>()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 7d4269550bb88..fa1348c82d71a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -2050,7 +2050,7 @@ public void testPostRecoveryMergeDisabledOnOlderIndices() throws Exception { IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersionBetween( random(), - IndexVersionUtils.getFirstVersion(), + IndexVersionUtils.getLowestWriteCompatibleVersion(), IndexVersionUtils.getPreviousVersion(IndexVersions.MERGE_ON_RECOVERY_VERSION) ) ) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index f5e581a81a37c..bda66d6a2c8cd 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -138,6 +138,9 @@ static TransportVersion def(int id) { public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_806_00_0); public static final TransportVersion SEMANTIC_QUERY_LENIENT = def(8_807_00_0); public static final TransportVersion ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS = def(8_808_00_0); + public static final TransportVersion EQL_ALLOW_PARTIAL_SEARCH_RESULTS = def(8_809_00_0); + public static final TransportVersion NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION = def(8_810_00_0); + public static final TransportVersion ERROR_TRACE_IN_TRANSPORT_HEADER = def(8_811_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 47c43eadcfb03..8873c9b0e281e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -191,6 +191,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_16_2 = new Version(8_16_02_99); + public static final Version V_8_16_3 = new Version(8_16_03_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_8_17_1 = new Version(8_17_01_99); public static final Version V_8_18_0 = new Version(8_18_00_99); diff --git a/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java b/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java index f149603f12d8b..16f37c9573a8e 100644 --- a/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java +++ b/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java @@ -150,10 +150,26 @@ public static ResolvedIndices resolveWithIndicesRequest( RemoteClusterService remoteClusterService, long startTimeInMillis ) { - final Map remoteClusterIndices = remoteClusterService.groupIndices( + return resolveWithIndexNamesAndOptions( + request.indices(), request.indicesOptions(), - request.indices() + clusterState, + indexNameExpressionResolver, + remoteClusterService, + startTimeInMillis ); + } + + public static ResolvedIndices resolveWithIndexNamesAndOptions( + String[] indexNames, + IndicesOptions indicesOptions, + ClusterState clusterState, + IndexNameExpressionResolver indexNameExpressionResolver, + RemoteClusterService remoteClusterService, + long startTimeInMillis + ) { + final Map remoteClusterIndices = remoteClusterService.groupIndices(indicesOptions, indexNames); + final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); Index[] concreteLocalIndices = localIndices == null diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index cfc2e1bcdaf2b..2041754bc2bcc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -456,7 +456,8 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> searchService.executeQueryPhase( request, (SearchShardTask) task, - new ChannelActionListener<>(channel) + new ChannelActionListener<>(channel), + channel.getVersion() ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -468,7 +469,8 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> searchService.executeQueryPhase( request, (SearchShardTask) task, - new ChannelActionListener<>(channel) + new ChannelActionListener<>(channel), + channel.getVersion() ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 7bf367f99b929..7c757e7657853 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -37,6 +37,7 @@ import java.util.SortedSet; import java.util.TreeSet; +import static org.elasticsearch.TransportVersions.NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; /** @@ -325,7 +326,17 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); + Version version = Version.readVersion(in); + IndexVersion minIndexVersion = IndexVersion.readVersion(in); + IndexVersion minReadOnlyIndexVersion; + if (in.getTransportVersion().onOrAfter(NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION)) { + minReadOnlyIndexVersion = IndexVersion.readVersion(in); + } else { + minReadOnlyIndexVersion = minIndexVersion; + + } + IndexVersion maxIndexVersion = IndexVersion.readVersion(in); + versionInfo = new VersionInformation(version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); if (in.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { this.externalId = readStringLiteral.read(in); } else { @@ -360,6 +371,9 @@ public void writeTo(StreamOutput out) throws IOException { }); Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); + if (out.getTransportVersion().onOrAfter(NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION)) { + IndexVersion.writeVersion(versionInfo.minReadOnlyIndexVersion(), out); + } IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); if (out.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { out.writeString(externalId); @@ -478,6 +492,10 @@ public IndexVersion getMinIndexVersion() { return versionInfo.minIndexVersion(); } + public IndexVersion getMinReadOnlyIndexVersion() { + return versionInfo.minReadOnlyIndexVersion(); + } + public IndexVersion getMaxIndexVersion() { return versionInfo.maxIndexVersion(); } @@ -577,6 +595,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.field("version", versionInfo.buildVersion().toString()); builder.field("min_index_version", versionInfo.minIndexVersion()); + builder.field("min_read_only_index_version", versionInfo.minReadOnlyIndexVersion()); builder.field("max_index_version", versionInfo.maxIndexVersion()); builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 5e6dec7b68062..f733ab223fdd1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -69,6 +69,7 @@ public class DiscoveryNodes implements Iterable, SimpleDiffable> tiersToNodeIds; @@ -84,6 +85,7 @@ private DiscoveryNodes( Version minNodeVersion, IndexVersion maxDataNodeCompatibleIndexVersion, IndexVersion minSupportedIndexVersion, + IndexVersion minReadOnlySupportedIndexVersion, Map> tiersToNodeIds ) { this.nodeLeftGeneration = nodeLeftGeneration; @@ -100,6 +102,8 @@ private DiscoveryNodes( this.maxNodeVersion = maxNodeVersion; this.maxDataNodeCompatibleIndexVersion = maxDataNodeCompatibleIndexVersion; this.minSupportedIndexVersion = minSupportedIndexVersion; + this.minReadOnlySupportedIndexVersion = minReadOnlySupportedIndexVersion; + assert minReadOnlySupportedIndexVersion.onOrBefore(minSupportedIndexVersion); assert (localNodeId == null) == (localNode == null); this.tiersToNodeIds = tiersToNodeIds; } @@ -118,6 +122,7 @@ public DiscoveryNodes withMasterNodeId(@Nullable String masterNodeId) { minNodeVersion, maxDataNodeCompatibleIndexVersion, minSupportedIndexVersion, + minReadOnlySupportedIndexVersion, tiersToNodeIds ); } @@ -374,6 +379,13 @@ public IndexVersion getMinSupportedIndexVersion() { return minSupportedIndexVersion; } + /** + * Returns the minimum index version for read-only indices supported by all nodes in the cluster + */ + public IndexVersion getMinReadOnlySupportedIndexVersion() { + return minReadOnlySupportedIndexVersion; + } + /** * Return the node-left generation, which is the number of times the cluster membership has been updated by removing one or more nodes. *

@@ -840,6 +852,7 @@ public DiscoveryNodes build() { Version maxNodeVersion = null; IndexVersion maxDataNodeCompatibleIndexVersion = null; IndexVersion minSupportedIndexVersion = null; + IndexVersion minReadOnlySupportedIndexVersion = null; for (Map.Entry nodeEntry : nodes.entrySet()) { DiscoveryNode discoNode = nodeEntry.getValue(); Version version = discoNode.getVersion(); @@ -849,6 +862,7 @@ public DiscoveryNodes build() { minNodeVersion = min(minNodeVersion, version); maxNodeVersion = max(maxNodeVersion, version); minSupportedIndexVersion = max(minSupportedIndexVersion, discoNode.getMinIndexVersion()); + minReadOnlySupportedIndexVersion = max(minReadOnlySupportedIndexVersion, discoNode.getMinReadOnlyIndexVersion()); } final long newNodeLeftGeneration; @@ -881,6 +895,7 @@ public DiscoveryNodes build() { Objects.requireNonNullElse(minNodeVersion, Version.CURRENT.minimumCompatibilityVersion()), Objects.requireNonNullElse(maxDataNodeCompatibleIndexVersion, IndexVersion.current()), Objects.requireNonNullElse(minSupportedIndexVersion, IndexVersions.MINIMUM_COMPATIBLE), + Objects.requireNonNullElse(minReadOnlySupportedIndexVersion, IndexVersions.MINIMUM_READONLY_COMPATIBLE), computeTiersToNodesMap(dataNodes) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java b/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java index a4d0ff1eb55e4..852f31db69c92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java @@ -18,20 +18,23 @@ /** * Represents the versions of various aspects of an Elasticsearch node. - * @param buildVersion The node {@link BuildVersion} - * @param minIndexVersion The minimum {@link IndexVersion} supported by this node - * @param maxIndexVersion The maximum {@link IndexVersion} supported by this node + * @param buildVersion The node {@link BuildVersion} + * @param minIndexVersion The minimum {@link IndexVersion} supported by this node + * @param minReadOnlyIndexVersion The minimum {@link IndexVersion} for read-only indices supported by this node + * @param maxIndexVersion The maximum {@link IndexVersion} supported by this node */ public record VersionInformation( BuildVersion buildVersion, Version nodeVersion, IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, IndexVersion maxIndexVersion ) { public static final VersionInformation CURRENT = new VersionInformation( BuildVersion.current(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ); @@ -39,11 +42,18 @@ public record VersionInformation( Objects.requireNonNull(buildVersion); Objects.requireNonNull(nodeVersion); Objects.requireNonNull(minIndexVersion); + Objects.requireNonNull(minReadOnlyIndexVersion); Objects.requireNonNull(maxIndexVersion); + assert minReadOnlyIndexVersion.onOrBefore(minIndexVersion) : minReadOnlyIndexVersion + " > " + minIndexVersion; } - public VersionInformation(BuildVersion version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) { - this(version, Version.CURRENT, minIndexVersion, maxIndexVersion); + public VersionInformation( + BuildVersion version, + IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, + IndexVersion maxIndexVersion + ) { + this(version, Version.CURRENT, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); /* * Whilst DiscoveryNode.getVersion exists, we need to be able to get a Version from VersionInfo * This needs to be consistent - on serverless, BuildVersion has an id of -1, which translates @@ -57,7 +67,17 @@ public VersionInformation(BuildVersion version, IndexVersion minIndexVersion, In @Deprecated public VersionInformation(Version version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) { - this(BuildVersion.fromVersionId(version.id()), version, minIndexVersion, maxIndexVersion); + this(version, minIndexVersion, minIndexVersion, maxIndexVersion); + } + + @Deprecated + public VersionInformation( + Version version, + IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, + IndexVersion maxIndexVersion + ) { + this(BuildVersion.fromVersionId(version.id()), version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); } @Deprecated diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index a9e13b86a5159..6841cb5bead0a 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -24,6 +24,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Tuple; import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.tracing.TraceContext; @@ -530,6 +532,17 @@ public String getHeader(String key) { return value; } + /** + * Returns the header for the given key or defaultValue if not present + */ + public String getHeaderOrDefault(String key, String defaultValue) { + String value = getHeader(key); + if (value == null) { + return defaultValue; + } + return value; + } + /** * Returns all of the request headers from the thread's context.
* Be advised, headers might contain credentials. @@ -589,6 +602,14 @@ public void putHeader(Map header) { threadLocal.set(threadLocal.get().putHeaders(header)); } + public void setErrorTraceTransportHeader(RestRequest r) { + // set whether data nodes should send back stack trace based on the `error_trace` query parameter + if (r.paramAsBoolean("error_trace", RestController.ERROR_TRACE_DEFAULT)) { + // We only set it if error_trace is true (defaults to false) to avoid sending useless bytes + putHeader("error_trace", "true"); + } + } + /** * Puts a transient header object into this context */ diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index ce849c26ab780..98715127351aa 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -110,6 +110,7 @@ private void openProbeConnection(ActionListener listener) new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ), diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index b15828c5594ae..9273888b9ec91 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -685,29 +685,11 @@ public void validate(Boolean enabled, Map, Object> settings) { ); } } - - // Verify that all nodes can handle this setting - var version = (IndexVersion) settings.get(SETTING_INDEX_VERSION_CREATED); - if (version.before(IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY) - && version.between( - IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT, - IndexVersions.UPGRADE_TO_LUCENE_10_0_0 - ) == false) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "The setting [%s] is unavailable on this cluster because some nodes are running older " - + "versions that do not support it. Please upgrade all nodes to the latest version " - + "and try again.", - RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() - ) - ); - } } @Override public Iterator> settings() { - List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, SETTING_INDEX_VERSION_CREATED, MODE); + List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, MODE); return res.iterator(); } }, @@ -1050,6 +1032,24 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti indexMappingSourceMode = scopedSettings.get(INDEX_MAPPER_SOURCE_MODE_SETTING); recoverySourceEnabled = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get(nodeSettings); recoverySourceSyntheticEnabled = scopedSettings.get(RECOVERY_USE_SYNTHETIC_SOURCE_SETTING); + if (recoverySourceSyntheticEnabled) { + // Verify that all nodes can handle this setting + if (version.before(IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY) + && version.between( + IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT, + IndexVersions.UPGRADE_TO_LUCENE_10_0_0 + ) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "The setting [%s] is unavailable on this cluster because some nodes are running older " + + "versions that do not support it. Please upgrade all nodes to the latest version " + + "and try again.", + RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() + ) + ); + } + } scopedSettings.addSettingsUpdateConsumer( MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index fd321f6256194..8af10524813cc 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.Version; import org.elasticsearch.ReleaseVersions; import org.elasticsearch.core.Assertions; -import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.text.ParseException; @@ -25,6 +24,7 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.function.IntFunction; +import java.util.stream.Collectors; @SuppressWarnings("deprecation") public class IndexVersions { @@ -58,7 +58,6 @@ private static Version parseUnchecked(String version) { } } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) // remove the index versions with which v9 will not need to interact public static final IndexVersion ZERO = def(0, Version.LATEST); public static final IndexVersion V_7_0_0 = def(7_00_00_99, parseUnchecked("8.0.0")); @@ -244,10 +243,12 @@ static NavigableMap getAllVersionIds(Class cls) { return Collections.unmodifiableNavigableMap(builder); } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - // We can simplify this once we've removed all references to index versions earlier than MINIMUM_COMPATIBLE + static Collection getAllWriteVersions() { + return VERSION_IDS.values().stream().filter(v -> v.onOrAfter(IndexVersions.MINIMUM_COMPATIBLE)).collect(Collectors.toSet()); + } + static Collection getAllVersions() { - return VERSION_IDS.values().stream().filter(v -> v.onOrAfter(MINIMUM_COMPATIBLE)).toList(); + return VERSION_IDS.values(); } static final IntFunction VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(IndexVersions.class, LATEST_DEFINED.id()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 276d3e151361c..5dbaf0e0f40ad 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -77,7 +77,8 @@ public Set getTestFeatures() { DocumentParser.FIX_PARSING_SUBOBJECTS_FALSE_DYNAMIC_FALSE, CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX, META_FETCH_FIELDS_ERROR_CODE_CHANGED, - SPARSE_VECTOR_STORE_SUPPORT + SPARSE_VECTOR_STORE_SUPPORT, + SourceFieldMapper.SYNTHETIC_RECOVERY_SOURCE ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 85f4217811a84..5f1ba6f0ab2a1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -56,6 +56,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { "mapper.source.remove_synthetic_source_only_validation" ); public static final NodeFeature SOURCE_MODE_FROM_INDEX_SETTING = new NodeFeature("mapper.source.mode_from_index_setting"); + public static final NodeFeature SYNTHETIC_RECOVERY_SOURCE = new NodeFeature("mapper.synthetic_recovery_source"); public static final String NAME = "_source"; public static final String RECOVERY_SOURCE_NAME = "_recovery_source"; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java index 93a2157b2338a..de2632165b0cc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.DynamicFieldType; import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; @@ -670,7 +671,7 @@ public static final class RootFlattenedFieldType extends StringFieldType impleme private final boolean isDimension; private final int ignoreAbove; - public RootFlattenedFieldType( + RootFlattenedFieldType( String name, boolean indexed, boolean hasDocValues, @@ -682,7 +683,7 @@ public RootFlattenedFieldType( this(name, indexed, hasDocValues, meta, splitQueriesOnWhitespace, eagerGlobalOrdinals, Collections.emptyList(), ignoreAbove); } - public RootFlattenedFieldType( + RootFlattenedFieldType( String name, boolean indexed, boolean hasDocValues, @@ -806,6 +807,10 @@ public MappedFieldType getChildFieldType(String childPath) { return new KeyedFlattenedFieldType(name(), childPath, this); } + public MappedFieldType getKeyedFieldType() { + return new KeywordFieldMapper.KeywordFieldType(name() + KEYED_FIELD_SUFFIX); + } + @Override public boolean isDimension() { return isDimension; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java index 950fef95772fb..53f68fb6edeef 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java @@ -55,7 +55,7 @@ * }` * */ -class FlattenedFieldSyntheticWriterHelper { +public class FlattenedFieldSyntheticWriterHelper { private record Prefix(List prefix) { @@ -225,17 +225,17 @@ public boolean equals(Object obj) { } } - interface SortedKeyedValues { + public interface SortedKeyedValues { BytesRef next() throws IOException; } private final SortedKeyedValues sortedKeyedValues; - FlattenedFieldSyntheticWriterHelper(final SortedKeyedValues sortedKeyedValues) { + public FlattenedFieldSyntheticWriterHelper(final SortedKeyedValues sortedKeyedValues) { this.sortedKeyedValues = sortedKeyedValues; } - void write(final XContentBuilder b) throws IOException { + public void write(final XContentBuilder b) throws IOException { KeyValue curr = new KeyValue(sortedKeyedValues.next()); KeyValue prev = KeyValue.EMPTY; final List values = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index 6d4b2dd4ab1f5..88f1ab1ba5c2e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -13,12 +13,10 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -30,7 +28,6 @@ * A function that computes a random score for the matched documents */ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RandomScoreFunctionBuilder.class); public static final String NAME = "random_score"; private String field; @@ -140,17 +137,7 @@ protected ScoreFunction doToFunction(SearchExecutionContext context) { // DocID-based random score generation return new RandomScoreFunction(hash(context.nowInMillis()), salt, null); } else { - String fieldName; - if (field == null) { - deprecationLogger.warn( - DeprecationCategory.QUERIES, - "seed_requires_field", - "As of version 7.0 Elasticsearch will require that a [field] parameter is provided when a [seed] is set" - ); - fieldName = IdFieldMapper.NAME; - } else { - fieldName = field; - } + final String fieldName = Objects.requireNonNullElse(field, SeqNoFieldMapper.NAME); if (context.isFieldMapped(fieldName) == false) { if (context.hasMappings() == false) { // no mappings: the index is empty anyway diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 17e56a392daff..5cfe1c104d45e 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -83,7 +83,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -553,9 +553,10 @@ private SettingsModule validateSettings(Settings envSettings, Settings settings, return settingsModule; } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) + @UpdateForV10(owner = UpdateForV10.Owner.SEARCH_FOUNDATIONS) private static void addBwcSearchWorkerSettings(List> additionalSettings) { - // TODO remove the below settings, they are unused and only here to enable BwC for deployments that still use them + // Search workers thread pool has been removed in Elasticsearch 8.16.0. These settings are deprecated and take no effect. + // They are here only to enable BwC for deployments that still use them additionalSettings.add( Setting.intSetting("thread_pool.search_worker.queue_size", 0, Setting.Property.NodeScope, Setting.Property.DeprecatedWarning) ); diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 4564a37dacf4a..509086b982319 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -269,5 +269,4 @@ protected Set responseParams() { protected Set responseParams(RestApiVersion restApiVersion) { return responseParams(); } - } diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 49fe794bbe615..49801499ea991 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -93,6 +93,7 @@ public class RestController implements HttpServerTransport.Dispatcher { public static final String STATUS_CODE_KEY = "es_rest_status_code"; public static final String HANDLER_NAME_KEY = "es_rest_handler_name"; public static final String REQUEST_METHOD_KEY = "es_rest_request_method"; + public static final boolean ERROR_TRACE_DEFAULT = false; static { try (InputStream stream = RestController.class.getResourceAsStream("/config/favicon.ico")) { @@ -638,7 +639,7 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel private static void validateErrorTrace(RestRequest request, RestChannel channel) { // error_trace cannot be used when we disable detailed errors // we consume the error_trace parameter first to ensure that it is always consumed - if (request.paramAsBoolean("error_trace", false) && channel.detailedErrorsEnabled() == false) { + if (request.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && channel.detailedErrorsEnabled() == false) { throw new IllegalArgumentException("error traces in responses are disabled."); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index d043974055667..0c359e0a4a053 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -37,6 +37,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER; +import static org.elasticsearch.rest.RestController.ERROR_TRACE_DEFAULT; public final class RestResponse implements Releasable { @@ -143,7 +144,7 @@ public RestResponse(RestChannel channel, RestStatus status, Exception e) throws // switched in the xcontent rendering parameters. // For authorization problems (RestStatus.UNAUTHORIZED) we don't want to do this since this could // leak information to the caller who is unauthorized to make this call - if (params.paramAsBoolean("error_trace", false) && status != RestStatus.UNAUTHORIZED) { + if (params.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && status != RestStatus.UNAUTHORIZED) { params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } diff --git a/server/src/main/java/org/elasticsearch/rest/RestUtils.java b/server/src/main/java/org/elasticsearch/rest/RestUtils.java index df51b57f0859c..bbca086e345f7 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestUtils.java +++ b/server/src/main/java/org/elasticsearch/rest/RestUtils.java @@ -15,6 +15,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import java.net.URI; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.Arrays; @@ -35,6 +36,13 @@ public class RestUtils { public static final UnaryOperator REST_DECODER = RestUtils::decodeComponent; + public static void decodeQueryString(URI uri, Map params) { + final var rawQuery = uri.getRawQuery(); + if (Strings.hasLength(rawQuery)) { + decodeQueryString(rawQuery, 0, params); + } + } + public static void decodeQueryString(String s, int fromIndex, Map params) { if (fromIndex < 0) { return; diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 24fab92ced392..87b1a6b9c2fa8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -72,6 +72,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + if (client.threadPool() != null && client.threadPool().getThreadContext() != null) { + client.threadPool().getThreadContext().setErrorTraceTransportHeader(request); + } final MultiSearchRequest multiSearchRequest = parseRequest(request, allowExplicitIndex, searchUsageHolder, clusterSupportsFeature); return channel -> { final RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index a9c2ff7576b05..99c11bb60b8f0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -95,7 +95,9 @@ public Set supportedCapabilities() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - + if (client.threadPool() != null && client.threadPool().getThreadContext() != null) { + client.threadPool().getThreadContext().setErrorTraceTransportHeader(request); + } SearchRequest searchRequest = new SearchRequest(); // access the BwC param, but just drop it // this might be set by old clients diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index b9bd398500c71..4557ccb3d2220 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -17,6 +17,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ResolvedIndices; @@ -152,6 +154,7 @@ import java.util.function.LongSupplier; import java.util.function.Supplier; +import static org.elasticsearch.TransportVersions.ERROR_TRACE_IN_TRANSPORT_HEADER; import static org.elasticsearch.core.TimeValue.timeValueHours; import static org.elasticsearch.core.TimeValue.timeValueMillis; import static org.elasticsearch.core.TimeValue.timeValueMinutes; @@ -272,6 +275,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; + private static final StackTraceElement[] EMPTY_STACK_TRACE_ARRAY = new StackTraceElement[0]; private final ThreadPool threadPool; @@ -506,7 +510,41 @@ protected void doClose() { keepAliveReaper.cancel(); } + /** + * Wraps the listener to avoid sending StackTraces back to the coordinating + * node if the `error_trace` header is set to {@code false}. Upon reading we + * default to {@code true} to maintain the same behavior as before the change, + * due to older nodes not being able to specify whether it needs stack traces. + * + * @param the type of the response + * @param listener the action listener to be wrapped + * @param version channel version of the request + * @param threadPool with context where to write the new header + * @return the wrapped action listener + */ + static ActionListener maybeWrapListenerForStackTrace( + ActionListener listener, + TransportVersion version, + ThreadPool threadPool + ) { + boolean header = true; + if (version.onOrAfter(ERROR_TRACE_IN_TRANSPORT_HEADER) && threadPool.getThreadContext() != null) { + header = Boolean.parseBoolean(threadPool.getThreadContext().getHeaderOrDefault("error_trace", "false")); + } + if (header == false) { + return listener.delegateResponse((l, e) -> { + ExceptionsHelper.unwrapCausesAndSuppressed(e, err -> { + err.setStackTrace(EMPTY_STACK_TRACE_ARRAY); + return false; + }); + l.onFailure(e); + }); + } + return listener; + } + public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, ActionListener listener) { + listener = maybeWrapListenerForStackTrace(listener, request.getChannelVersion(), threadPool); final IndexShard shard = getShard(request); rewriteAndFetchShardRequest(shard, request, listener.delegateFailure((l, rewritten) -> { // fork the execution in the search thread pool @@ -544,10 +582,11 @@ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final Sea } public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, ActionListener listener) { + ActionListener finalListener = maybeWrapListenerForStackTrace(listener, request.getChannelVersion(), threadPool); assert request.canReturnNullResponseIfMatchNoDocs() == false || request.numberOfShards() > 1 : "empty responses require more than one shard"; final IndexShard shard = getShard(request); - rewriteAndFetchShardRequest(shard, request, listener.delegateFailure((l, orig) -> { + rewriteAndFetchShardRequest(shard, request, finalListener.delegateFailure((l, orig) -> { // check if we can shortcut the query phase entirely. if (orig.canReturnNullResponseIfMatchNoDocs()) { assert orig.scroll() == null; @@ -561,7 +600,7 @@ public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, ); CanMatchShardResponse canMatchResp = canMatch(canMatchContext, false); if (canMatchResp.canMatch() == false) { - listener.onResponse(QuerySearchResult.nullInstance()); + finalListener.onResponse(QuerySearchResult.nullInstance()); return; } } @@ -736,6 +775,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh } public void executeRankFeaturePhase(RankFeatureShardRequest request, SearchShardTask task, ActionListener listener) { + listener = maybeWrapListenerForStackTrace(listener, request.getShardSearchRequest().getChannelVersion(), threadPool); final ReaderContext readerContext = findReaderContext(request.contextId(), request); final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.getShardSearchRequest()); final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest)); @@ -779,8 +819,10 @@ private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchCon public void executeQueryPhase( InternalScrollSearchRequest request, SearchShardTask task, - ActionListener listener + ActionListener listener, + TransportVersion version ) { + listener = maybeWrapListenerForStackTrace(listener, version, threadPool); final LegacyReaderContext readerContext = (LegacyReaderContext) findReaderContext(request.contextId(), request); final Releasable markAsUsed; try { @@ -816,7 +858,13 @@ public void executeQueryPhase( * It is the responsibility of the caller to ensure that the ref count is correctly decremented * when the object is no longer needed. */ - public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, ActionListener listener) { + public void executeQueryPhase( + QuerySearchRequest request, + SearchShardTask task, + ActionListener listener, + TransportVersion version + ) { + listener = maybeWrapListenerForStackTrace(listener, version, threadPool); final ReaderContext readerContext = findReaderContext(request.contextId(), request.shardSearchRequest()); final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.shardSearchRequest()); final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index a8ccd1c76d031..4d0f58756b11c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.Rounding; -import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregator; @@ -42,49 +41,6 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { ); builder.register(DateHistogramAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.RANGE, DateRangeHistogramAggregator::new, true); - - builder.register( - DateHistogramAggregationBuilder.REGISTRY_KEY, - CoreValuesSourceType.BOOLEAN, - ( - name, - factories, - rounding, - order, - keyed, - minDocCount, - downsampledResultsOffset, - extendedBounds, - hardBounds, - valuesSourceConfig, - context, - parent, - cardinality, - metadata) -> { - DEPRECATION_LOGGER.warn( - DeprecationCategory.AGGREGATIONS, - "date-histogram-boolean", - "Running DateHistogram aggregations on [boolean] fields is deprecated" - ); - return DateHistogramAggregator.build( - name, - factories, - rounding, - order, - keyed, - minDocCount, - downsampledResultsOffset, - extendedBounds, - hardBounds, - valuesSourceConfig, - context, - parent, - cardinality, - metadata - ); - }, - true - ); } private final DateHistogramAggregationSupplier aggregatorSupplier; diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index 648d27c885843..302175cc4f5a0 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -41,6 +42,18 @@ public ConnectTransportException(StreamInput in) throws IOException { } } + /** + * The ES REST API is a gateway to a single or multiple clusters. If there is an error connecting to other servers, then we should + * return a 502 BAD_GATEWAY status code instead of the parent class' 500 INTERNAL_SERVER_ERROR. Clients tend to retry on a 502 but not + * on a 500, and retrying may help on a connection error. + * + * @return a {@link RestStatus#BAD_GATEWAY} code + */ + @Override + public final RestStatus status() { + return RestStatus.BAD_GATEWAY; + } + @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index d5047a61e4606..eb2eab75d3fe3 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -303,6 +303,7 @@ public void onFailure(Exception e) { new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ); diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 2c198caf22354..854072c49e354 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -505,6 +505,7 @@ private static DiscoveryNode resolveSeedNode(String clusterAlias, String address var seedVersion = new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ); if (proxyAddress == null || proxyAddress.isEmpty()) { diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 08db0822dfef5..2016f59b58a3e 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -135,4 +135,5 @@ 8.15.5,8702003 8.16.0,8772001 8.16.1,8772004 +8.16.2,8772004 8.17.0,8797002 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index afe696f31d323..3bfeeded6494c 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -135,4 +135,5 @@ 8.15.5,8512000 8.16.0,8518000 8.16.1,8518000 +8.16.2,8518000 8.17.0,8521000 diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 2abe4157583cd..31f54f9a16359 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -409,6 +409,7 @@ public void testConnectTransportException() throws IOException { ex = serialize(new ConnectTransportException(node, "msg", "action", new NullPointerException())); assertEquals("[][" + transportAddress + "][action] msg", ex.getMessage()); assertThat(ex.getCause(), instanceOf(NullPointerException.class)); + assertEquals(RestStatus.BAD_GATEWAY, ex.status()); } public void testSearchPhaseExecutionException() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index b59cc13a20ff2..69cff0fc45ac3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -127,6 +127,7 @@ public void testToXContentWithDeprecatedClusterState() { ], "version": "%s", "min_index_version": %s, + "min_read_only_index_version": %s, "max_index_version": %s } }, @@ -218,6 +219,7 @@ public void testToXContentWithDeprecatedClusterState() { clusterState.getNodes().get("node0").getEphemeralId(), Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), IndexVersion.current(), IndexVersion.current() diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 668aea70c23f2..5f4426b02ce1a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -213,6 +213,7 @@ public void testToXContent() throws IOException { ], "version": "%s", "min_index_version":%s, + "min_read_only_index_version":%s, "max_index_version":%s } }, @@ -389,6 +390,7 @@ public void testToXContent() throws IOException { ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), @@ -488,6 +490,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ], "version" : "%s", "min_index_version" : %s, + "min_read_only_index_version" : %s, "max_index_version" : %s } }, @@ -663,6 +666,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), @@ -762,6 +766,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ], "version" : "%s", "min_index_version" : %s, + "min_read_only_index_version" : %s, "max_index_version" : %s } }, @@ -943,6 +948,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), IndexVersion.current(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/HumanReadableIndexSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/HumanReadableIndexSettingsTests.java index 476ade8576586..8752e68112bff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/HumanReadableIndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/HumanReadableIndexSettingsTests.java @@ -22,7 +22,7 @@ public class HumanReadableIndexSettingsTests extends ESTestCase { public void testHumanReadableSettings() { - IndexVersion versionCreated = randomVersion(random()); + IndexVersion versionCreated = randomVersion(); long created = System.currentTimeMillis(); Settings testSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, versionCreated) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index c0e397c9fb9c9..d45b2c119d2ae 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -369,11 +369,7 @@ public void testValidateSplitIndex() { } public void testPrepareResizeIndexSettings() { - final List versions = Stream.of(IndexVersionUtils.randomVersion(random()), IndexVersionUtils.randomVersion(random())) - .sorted() - .toList(); - final IndexVersion version = versions.get(0); - final IndexVersion upgraded = versions.get(1); + final IndexVersion version = IndexVersionUtils.randomWriteVersion(); final Settings.Builder indexSettingsBuilder = Settings.builder() .put("index.version.created", version) .put("index.similarity.default.type", "BM25") diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java index 0354b6f0bcea8..3ada92dbe7ae5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java @@ -97,7 +97,7 @@ public void testDeleteSnapshotting() { Map.of(), null, SnapshotInfoTestUtils.randomUserMetadata(), - IndexVersionUtils.randomVersion(random()) + IndexVersionUtils.randomVersion() ) ); ClusterState state = ClusterState.builder(clusterState(index)).putCustom(SnapshotsInProgress.TYPE, snaps).build(); @@ -153,7 +153,7 @@ public void testDeleteIndexWithAnAlias() { String alias = randomAlphaOfLength(5); IndexMetadata idxMetadata = IndexMetadata.builder(index) - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random()))) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion())) .putAlias(AliasMetadata.builder(alias).writeIndex(true).build()) .numberOfShards(1) .numberOfReplicas(1) @@ -348,7 +348,7 @@ public void testDeleteCurrentWriteFailureIndexForDataStream() { private ClusterState clusterState(String index) { IndexMetadata indexMetadata = IndexMetadata.builder(index) - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random()))) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion())) .numberOfShards(1) .numberOfReplicas(1) .build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java index 4f2c84d76b5a4..22ddb5cc2ba35 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java @@ -744,7 +744,7 @@ private ClusterState applyHiddenAliasMix(ClusterState before, Boolean isHidden1, private ClusterState createIndex(ClusterState state, String index) { IndexMetadata indexMetadata = IndexMetadata.builder(index) - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random()))) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomWriteVersion())) .numberOfShards(1) .numberOfReplicas(1) .build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java index db5a98a4878ca..bd11e636d51c1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -377,7 +377,7 @@ private static ClusterState addSnapshotIndex(final String index, final int numSh shardsBuilder, null, SnapshotInfoTestUtils.randomUserMetadata(), - IndexVersionUtils.randomVersion(random()) + IndexVersionUtils.randomVersion() ); return ClusterState.builder(newState).putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY.withAddedEntry(entry)).build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java index 331b5d92ca94e..fa7633f0eaf75 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java @@ -31,6 +31,8 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.test.NodeRoles.nonRemoteClusterClientNode; import static org.elasticsearch.test.NodeRoles.remoteClusterClientNode; +import static org.elasticsearch.test.TransportVersionUtils.getPreviousVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -221,6 +223,7 @@ public void testDiscoveryNodeToXContent() { ], "version" : "%s", "min_index_version" : %s, + "min_read_only_index_version" : %s, "max_index_version" : %s } }""", @@ -228,6 +231,7 @@ public void testDiscoveryNodeToXContent() { withExternalId ? "test-external-id" : "test-name", Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current() ) ) @@ -250,4 +254,61 @@ public void testDiscoveryNodeToString() { assertThat(toString, containsString("{" + node.getBuildVersion() + "}")); assertThat(toString, containsString("{test-attr=val}"));// attributes } + + public void testDiscoveryNodeMinReadOnlyVersionSerialization() throws Exception { + var node = DiscoveryNodeUtils.create("_id", buildNewFakeTransportAddress(), VersionInformation.CURRENT); + + { + try (var out = new BytesStreamOutput()) { + out.setTransportVersion(TransportVersion.current()); + node.writeTo(out); + + try (var in = StreamInput.wrap(out.bytes().array())) { + in.setTransportVersion(TransportVersion.current()); + + var deserialized = new DiscoveryNode(in); + assertThat(deserialized.getId(), equalTo(node.getId())); + assertThat(deserialized.getAddress(), equalTo(node.getAddress())); + assertThat(deserialized.getMinIndexVersion(), equalTo(node.getMinIndexVersion())); + assertThat(deserialized.getMaxIndexVersion(), equalTo(node.getMaxIndexVersion())); + assertThat(deserialized.getMinReadOnlyIndexVersion(), equalTo(node.getMinReadOnlyIndexVersion())); + assertThat(deserialized.getVersionInformation(), equalTo(node.getVersionInformation())); + } + } + } + + { + var oldVersion = randomVersionBetween( + random(), + TransportVersions.MINIMUM_COMPATIBLE, + getPreviousVersion(TransportVersions.NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION) + ); + try (var out = new BytesStreamOutput()) { + out.setTransportVersion(oldVersion); + node.writeTo(out); + + try (var in = StreamInput.wrap(out.bytes().array())) { + in.setTransportVersion(oldVersion); + + var deserialized = new DiscoveryNode(in); + assertThat(deserialized.getId(), equalTo(node.getId())); + assertThat(deserialized.getAddress(), equalTo(node.getAddress())); + assertThat(deserialized.getMinIndexVersion(), equalTo(node.getMinIndexVersion())); + assertThat(deserialized.getMaxIndexVersion(), equalTo(node.getMaxIndexVersion())); + assertThat(deserialized.getMinReadOnlyIndexVersion(), equalTo(node.getMinIndexVersion())); + assertThat( + deserialized.getVersionInformation(), + equalTo( + new VersionInformation( + node.getBuildVersion(), + node.getMinIndexVersion(), + node.getMinIndexVersion(), + node.getMaxIndexVersion() + ) + ) + ); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index 22308e15f4845..eccdd1c6ffea7 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -43,7 +43,7 @@ private BuildVersion randomBuildVersion() { } private IndexVersion randomIndexVersion() { - return rarely() ? IndexVersion.fromId(randomInt()) : IndexVersionUtils.randomVersion(random()); + return rarely() ? IndexVersion.fromId(randomInt()) : IndexVersionUtils.randomVersion(); } public void testEqualsHashcodeSerialization() { diff --git a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java index 2a425c9256c31..8575b87c36799 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.util.Version; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; import org.hamcrest.Matchers; @@ -151,9 +150,7 @@ public void testMax() { } } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - @AwaitsFix(bugUrl = "believe this fails because index version has not yet been bumped to 9.0") - public void testMinimumCompatibleVersion() { + public void testGetMinimumCompatibleIndexVersion() { assertThat(IndexVersion.getMinimumCompatibleIndexVersion(7170099), equalTo(IndexVersion.fromId(6000099))); assertThat(IndexVersion.getMinimumCompatibleIndexVersion(8000099), equalTo(IndexVersion.fromId(7000099))); assertThat(IndexVersion.getMinimumCompatibleIndexVersion(10000000), equalTo(IndexVersion.fromId(9000000))); @@ -193,8 +190,6 @@ public void testParseLenient() { } } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - @AwaitsFix(bugUrl = "can be unmuted once lucene is bumped to version 10") public void testLuceneVersionOnUnknownVersions() { // between two known versions, should use the lucene version of the previous version IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(); @@ -207,7 +202,7 @@ public void testLuceneVersionOnUnknownVersions() { // too old version, major should be the oldest supported lucene version minus 1 IndexVersion oldVersion = IndexVersion.fromId(5020199); - assertThat(oldVersion.luceneVersion().major, equalTo(IndexVersionUtils.getFirstVersion().luceneVersion().major - 1)); + assertThat(oldVersion.luceneVersion().major, equalTo(IndexVersionUtils.getLowestReadCompatibleVersion().luceneVersion().major - 1)); // future version, should be the same version as today IndexVersion futureVersion = IndexVersion.fromId(currentVersion.id() + 100); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 04170030c1173..db780f0640986 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -106,7 +106,7 @@ public void setUp() throws Exception { } public void testDefaultAnalyzers() throws IOException { - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) @@ -120,7 +120,7 @@ public void testDefaultAnalyzers() throws IOException { } public void testOverrideDefaultAnalyzer() throws IOException { - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); IndexAnalyzers indexAnalyzers = AnalysisRegistry.build( IndexCreationContext.CREATE_INDEX, @@ -137,7 +137,7 @@ public void testOverrideDefaultAnalyzer() throws IOException { } public void testOverrideDefaultAnalyzerWithoutAnalysisModeAll() throws IOException { - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", settings); TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory("my_filter") { @@ -216,7 +216,7 @@ public void testOverrideDefaultIndexAnalyzerIsUnsupported() { } public void testOverrideDefaultSearchAnalyzer() { - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); IndexAnalyzers indexAnalyzers = AnalysisRegistry.build( IndexCreationContext.CREATE_INDEX, @@ -319,8 +319,8 @@ public void testBuiltInAnalyzersAreCached() throws IOException { } } - public void testNoTypeOrTokenizerErrorMessage() throws IOException { - IndexVersion version = IndexVersionUtils.randomVersion(random()); + public void testNoTypeOrTokenizerErrorMessage() { + IndexVersion version = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 0a7bd495f2f22..f5b86f422915e 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -63,7 +63,7 @@ public void testThatInstancesAreCachedAndReused() { PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersion.current()) ); // same index version should be cached - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); assertSame(PreBuiltAnalyzers.STANDARD.getAnalyzer(v), PreBuiltAnalyzers.STANDARD.getAnalyzer(v)); assertNotSame( PreBuiltAnalyzers.STANDARD.getAnalyzer(IndexVersion.current()), @@ -71,7 +71,7 @@ public void testThatInstancesAreCachedAndReused() { ); // Same Lucene version should be cached: - IndexVersion v1 = IndexVersionUtils.randomVersion(random()); + IndexVersion v1 = IndexVersionUtils.randomVersion(); IndexVersion v2 = new IndexVersion(v1.id() - 1, v1.luceneVersion()); assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(v1), PreBuiltAnalyzers.STOP.getAnalyzer(v2)); } @@ -81,7 +81,7 @@ public void testThatAnalyzersAreUsedInMapping() throws IOException { PreBuiltAnalyzers randomPreBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt]; String analyzerName = randomPreBuiltAnalyzer.name().toLowerCase(Locale.ROOT); - IndexVersion randomVersion = IndexVersionUtils.randomVersion(random()); + IndexVersion randomVersion = IndexVersionUtils.randomWriteVersion(); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, randomVersion).build(); NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider( diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java index 40b37452990c7..a1a91ef2373f3 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilterTests.java @@ -41,7 +41,7 @@ public boolean incrementToken() { IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY); - IndexVersion version1 = IndexVersionUtils.randomVersion(random()); + IndexVersion version1 = IndexVersionUtils.randomVersion(); Settings settings1 = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version1).build(); TokenFilterFactory tff_v1_1 = pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "singleton", settings1); TokenFilterFactory tff_v1_2 = pctf.get(indexSettings, TestEnvironment.newEnvironment(emptyNodeSettings), "singleton", settings1); @@ -66,7 +66,7 @@ public boolean incrementToken() { } ); - IndexVersion version1 = IndexVersionUtils.randomVersion(random()); + IndexVersion version1 = IndexVersionUtils.randomVersion(); IndexSettings indexSettings1 = IndexSettingsModule.newIndexSettings( "test", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version1).build() @@ -133,7 +133,7 @@ public boolean incrementToken() { ); assertSame(tff_v1_1, tff_v1_2); - IndexVersion version2 = IndexVersionUtils.getPreviousMajorVersion(IndexVersionUtils.getFirstVersion()); + IndexVersion version2 = IndexVersionUtils.getPreviousMajorVersion(IndexVersionUtils.getLowestReadCompatibleVersion()); IndexSettings indexSettings2 = IndexSettingsModule.newIndexSettings( "test", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version2).build() diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 3e3be6a315af2..d07c775da7e2a 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -6645,7 +6645,7 @@ public void testStoreHonorsLuceneVersion() throws IOException { for (IndexVersion createdVersion : List.of( IndexVersion.current(), lowestCompatiblePreviousVersion, - IndexVersionUtils.getFirstVersion() + IndexVersionUtils.getLowestWriteCompatibleVersion() )) { Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_VERSION_CREATED, createdVersion).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); diff --git a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java index 74d6e83aff266..b8600842effe4 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java @@ -44,7 +44,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RecoverySourcePruneMergePolicyTests extends ESTestCase { @@ -191,7 +191,7 @@ public void testPruneSome() throws IOException { } assertEquals(i, extra_source.docID()); if (syntheticRecoverySource) { - assertThat(extra_source.longValue(), greaterThan(10L)); + assertThat(extra_source.longValue(), greaterThanOrEqualTo(10L)); } else { assertThat(extra_source.longValue(), equalTo(1L)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index 378920d0e6db5..b7693513a434d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -465,60 +465,6 @@ public void testRecoverySourceWitInvalidSettings() { ) ); } - { - Settings settings = Settings.builder() - .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString()) - .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) - .build(); - IllegalArgumentException exc = expectThrows( - IllegalArgumentException.class, - () -> createMapperService( - IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT), - settings, - () -> false, - topMapping(b -> {}) - ) - ); - assertThat( - exc.getMessage(), - containsString( - String.format( - Locale.ROOT, - "The setting [%s] is unavailable on this cluster", - IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() - ) - ) - ); - } - { - Settings settings = Settings.builder() - .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString()) - .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) - .build(); - IllegalArgumentException exc = expectThrows( - IllegalArgumentException.class, - () -> createMapperService( - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.UPGRADE_TO_LUCENE_10_0_0, - IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER - ), - settings, - () -> false, - topMapping(b -> {}) - ) - ); - assertThat( - exc.getMessage(), - containsString( - String.format( - Locale.ROOT, - "The setting [%s] is unavailable on this cluster", - IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() - ) - ) - ); - } } public void testRecoverySourceWithSyntheticSource() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 742797e000517..f0e8d2943517c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -1514,19 +1514,19 @@ public void testVectorSimilarity() { ); assertEquals( VectorSimilarityFunction.EUCLIDEAN, - VectorSimilarity.L2_NORM.vectorSimilarityFunction(IndexVersionUtils.randomVersion(random()), ElementType.BYTE) + VectorSimilarity.L2_NORM.vectorSimilarityFunction(IndexVersionUtils.randomVersion(), ElementType.BYTE) ); assertEquals( VectorSimilarityFunction.EUCLIDEAN, - VectorSimilarity.L2_NORM.vectorSimilarityFunction(IndexVersionUtils.randomVersion(random()), ElementType.FLOAT) + VectorSimilarity.L2_NORM.vectorSimilarityFunction(IndexVersionUtils.randomVersion(), ElementType.FLOAT) ); assertEquals( VectorSimilarityFunction.DOT_PRODUCT, - VectorSimilarity.DOT_PRODUCT.vectorSimilarityFunction(IndexVersionUtils.randomVersion(random()), ElementType.BYTE) + VectorSimilarity.DOT_PRODUCT.vectorSimilarityFunction(IndexVersionUtils.randomVersion(), ElementType.BYTE) ); assertEquals( VectorSimilarityFunction.DOT_PRODUCT, - VectorSimilarity.DOT_PRODUCT.vectorSimilarityFunction(IndexVersionUtils.randomVersion(random()), ElementType.FLOAT) + VectorSimilarity.DOT_PRODUCT.vectorSimilarityFunction(IndexVersionUtils.randomVersion(), ElementType.FLOAT) ); } diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java index 8d060d94e4c21..b58ac513a6449 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilderTests.java @@ -66,7 +66,6 @@ public void testRandomScoreFunctionWithSeedNoField() throws Exception { Mockito.when(context.getFieldType(IdFieldMapper.NAME)).thenReturn(new KeywordFieldMapper.KeywordFieldType(IdFieldMapper.NAME)); Mockito.when(context.isFieldMapped(IdFieldMapper.NAME)).thenReturn(true); builder.toFunction(context); - assertWarnings("As of version 7.0 Elasticsearch will require that a [field] parameter is provided when a [seed] is set"); } public void testRandomScoreFunctionWithSeed() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index cf6941b84b791..1bcd84aadd6cd 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -233,7 +233,7 @@ public Map> getTokenizers() { new StablePluginsRegistry() ).getAnalysisRegistry(); - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); IndexAnalyzers analyzers = getIndexAnalyzers( registry, Settings.builder() @@ -302,7 +302,7 @@ public List getPreConfiguredTokenFilters() { new StablePluginsRegistry() ).getAnalysisRegistry(); - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); IndexAnalyzers analyzers = getIndexAnalyzers( registry, Settings.builder() @@ -389,7 +389,7 @@ public List getPreConfiguredTokenizers() { new StablePluginsRegistry() ).getAnalysisRegistry(); - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); IndexAnalyzers analyzers = getIndexAnalyzers( registry, Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java index ca9184bca75da..181d3ec44f2b3 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/IncorrectSetupStablePluginsTests.java @@ -63,7 +63,7 @@ public void testIncorrectlyAnnotatedSettingsClass() throws IOException { Settings.builder() .put("index.analysis.analyzer.char_filter_test.tokenizer", "standard") .put("index.analysis.analyzer.char_filter_test.char_filter", "incorrectlyAnnotatedSettings") - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build(), Map.of( "incorrectlyAnnotatedSettings", @@ -90,7 +90,7 @@ public void testIncorrectlyAnnotatedConstructor() throws IOException { Settings.builder() .put("index.analysis.analyzer.char_filter_test.tokenizer", "standard") .put("index.analysis.analyzer.char_filter_test.char_filter", "noInjectCharFilter") - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build(), Map.of("noInjectCharFilter", new PluginInfo("noInjectCharFilter", NoInjectCharFilter.class.getName(), classLoader)) ) @@ -112,7 +112,7 @@ public void testMultiplePublicConstructors() throws IOException { Settings.builder() .put("index.analysis.analyzer.char_filter_test.tokenizer", "standard") .put("index.analysis.analyzer.char_filter_test.char_filter", "multipleConstructors") - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build(), Map.of("multipleConstructors", new PluginInfo("multipleConstructors", MultipleConstructors.class.getName(), classLoader)) ) diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsNoSettingsTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsNoSettingsTests.java index 7cbda0e7086cb..6eac3847efa43 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsNoSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsNoSettingsTests.java @@ -61,7 +61,7 @@ public IndexAnalyzers getIndexAnalyzers(Settings settings) throws IOException { } public void testStablePlugins() throws IOException { - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomVersion(); IndexAnalyzers analyzers = getIndexAnalyzers( Settings.builder() .put("index.analysis.analyzer.char_filter_test.tokenizer", "standard") diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsWithSettingsTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsWithSettingsTests.java index acde315b140ab..82f49888e911d 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsWithSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/StableAnalysisPluginsWithSettingsTests.java @@ -72,7 +72,7 @@ public void testCharFilters() throws IOException { .put("index.analysis.analyzer.char_filter_with_defaults_test.tokenizer", "standard") .put("index.analysis.analyzer.char_filter_with_defaults_test.char_filter", "stableCharFilterFactory") - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build() ); assertTokenStreamContents(analyzers.get("char_filter_test").tokenStream("", "t#st"), new String[] { "t3st" }); @@ -88,7 +88,7 @@ public void testTokenFilters() throws IOException { .put("index.analysis.analyzer.token_filter_test.tokenizer", "standard") .put("index.analysis.analyzer.token_filter_test.filter", "my_token_filter") - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build() ); assertTokenStreamContents( @@ -109,7 +109,7 @@ public void testTokenizer() throws IOException { .putList("index.analysis.tokenizer.my_tokenizer.tokenizer_list_of_chars", "_", " ") .put("index.analysis.analyzer.tokenizer_test.tokenizer", "my_tokenizer") - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build() ); assertTokenStreamContents(analyzers.get("tokenizer_test").tokenStream("", "x_y z"), new String[] { "x", "y", "z" }); @@ -124,7 +124,7 @@ public void testAnalyzer() throws IOException { .put("index.analysis.analyzer.analyzer_provider_test.old_char", "#") .put("index.analysis.analyzer.analyzer_provider_test.new_number", 3) .put("index.analysis.analyzer.analyzer_provider_test.analyzerUseTokenListOfChars", true) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion()) .build() ); assertTokenStreamContents(analyzers.get("analyzer_provider_test").tokenStream("", "1x_y_#z"), new String[] { "y", "3z" }); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index d039c265c98ae..d9b2936dc30c0 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -721,7 +721,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE final IndexMetadata.Builder indexMetadata = IndexMetadata.builder("test") .settings( - indexSettings(IndexVersionUtils.randomVersion(random()), between(1, 5), between(0, 5)).put( + indexSettings(IndexVersionUtils.randomVersion(), between(1, 5), between(0, 5)).put( IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()) ) diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 33801dfb98417..ace8499d8ffd0 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -243,7 +243,7 @@ private static NodeInfo createNodeInfo() { return new NodeInfo( randomAlphaOfLengthBetween(6, 32), new CompatibilityVersions(TransportVersionUtils.randomVersion(random()), Map.of()), - IndexVersionUtils.randomVersion(random()), + IndexVersionUtils.randomVersion(), componentVersions, build, node, diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index c568f6a38a5fb..cd2327d90c5c5 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -966,7 +966,7 @@ private ClusterState insignificantChange(ClusterState clusterState) { } // Just add a random index - that shouldn't change anything IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10)) - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random()))) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion())) .numberOfShards(1) .numberOfReplicas(1) .build(); @@ -1044,7 +1044,7 @@ private ClusterState initialState() { private void changeRoutingTable(Metadata.Builder metadata, RoutingTable.Builder routingTable) { IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10)) - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random()))) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion())) .numberOfShards(1) .numberOfReplicas(1) .build(); diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index b85ad31288c8c..bd810cea216fc 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -51,6 +51,7 @@ import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.ElasticsearchExceptionTests.assertDeepEquals; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.rest.RestController.ERROR_TRACE_DEFAULT; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -180,7 +181,7 @@ public void testStackTrace() throws IOException { } else { assertThat(response.status(), is(RestStatus.BAD_REQUEST)); } - boolean traceExists = request.paramAsBoolean("error_trace", false) && channel.detailedErrorsEnabled(); + boolean traceExists = request.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && channel.detailedErrorsEnabled(); if (traceExists) { assertThat(response.content().utf8ToString(), containsString(ElasticsearchException.STACK_TRACE)); } else { diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java index 02593e41f5d84..0fc1694d39926 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java @@ -2684,7 +2684,8 @@ public void testDfsQueryPhaseRewrite() { service.executeQueryPhase( new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), new SearchShardTask(42L, "", "", "", null, emptyMap()), - plainActionFuture + plainActionFuture, + TransportVersion.current() ); plainActionFuture.actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 31bcab31ca8a7..d041121b8a96b 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -13,6 +13,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -53,9 +55,14 @@ import java.io.IOException; import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiFunction; import java.util.function.Predicate; +import static org.elasticsearch.search.SearchService.maybeWrapListenerForStackTrace; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.not; + public class SearchServiceTests extends IndexShardTestCase { public void testCanMatchMatchAll() throws IOException { @@ -117,6 +124,33 @@ public Type getType() { doTestCanMatch(searchRequest, sortField, true, null, false); } + public void testMaybeWrapListenerForStackTrace() { + // Tests that the same listener has stack trace if is not wrapped or does not have stack trace if it is wrapped. + AtomicBoolean isWrapped = new AtomicBoolean(false); + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + // noop - we only care about failure scenarios + } + + @Override + public void onFailure(Exception e) { + if (isWrapped.get()) { + assertThat(e.getStackTrace().length, is(0)); + } else { + assertThat(e.getStackTrace().length, is(not(0))); + } + } + }; + Exception e = new Exception(); + e.fillInStackTrace(); + assertThat(e.getStackTrace().length, is(not(0))); + listener.onFailure(e); + listener = maybeWrapListenerForStackTrace(listener, TransportVersion.current(), threadPool); + isWrapped.set(true); + listener.onFailure(e); + } + private void doTestCanMatch( SearchRequest searchRequest, SortField sortField, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 38294fb030ed4..bf26326abafbf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -83,9 +83,9 @@ public class DateHistogramAggregatorTests extends DateHistogramAggregatorTestCas "2017-12-12T22:55:46" ); - public void testBooleanFieldDeprecated() throws IOException { + public void testBooleanFieldUnsupported() throws IOException { final String fieldName = "bogusBoolean"; - testCase(iw -> { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { Document d = new Document(); d.add(new SortedNumericDocValuesField(fieldName, 0)); iw.addDocument(d); @@ -95,8 +95,8 @@ public void testBooleanFieldDeprecated() throws IOException { new DateHistogramAggregationBuilder("name").calendarInterval(DateHistogramInterval.HOUR).field(fieldName), new BooleanFieldMapper.BooleanFieldType(fieldName) ) - ); - assertWarnings("Running DateHistogram aggregations on [boolean] fields is deprecated"); + )); + assertThat(e.getMessage(), equalTo("Field [bogusBoolean] of type [boolean] is not supported for aggregation [date_histogram]")); } public void testMatchNoDocs() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index a92d55f6d419c..91ab253b2e1fe 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -133,7 +133,7 @@ private Entry randomSnapshot() { shards, null, SnapshotInfoTestUtils.randomUserMetadata(), - IndexVersionUtils.randomVersion(random()) + IndexVersionUtils.randomVersion() ); } diff --git a/test/fixtures/gcs-fixture/build.gradle b/test/fixtures/gcs-fixture/build.gradle index e8f1a2e15a4e0..6cf2e1ee52c2c 100644 --- a/test/fixtures/gcs-fixture/build.gradle +++ b/test/fixtures/gcs-fixture/build.gradle @@ -9,7 +9,6 @@ apply plugin: 'elasticsearch.java' description = 'Fixture for Google Cloud Storage service' -tasks.named("test").configure { enabled = false } dependencies { api project(':server') diff --git a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java index f6b52a32a9a1d..163712fb05a50 100644 --- a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java +++ b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java @@ -95,7 +95,7 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("GET /storage/v1/b/" + bucket + "/o*", request)) { // List Objects https://cloud.google.com/storage/docs/json_api/v1/objects/list final Map params = new HashMap<>(); - RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + RestUtils.decodeQueryString(exchange.getRequestURI(), params); final String prefix = params.getOrDefault("prefix", ""); final String delimiter = params.get("delimiter"); @@ -212,7 +212,7 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("POST /upload/storage/v1/b/" + bucket + "/*uploadType=resumable*", request)) { // Resumable upload initialization https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload final Map params = new HashMap<>(); - RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + RestUtils.decodeQueryString(exchange.getRequestURI(), params); final String blobName = params.get("name"); blobs.put(blobName, BytesArray.EMPTY); @@ -237,7 +237,7 @@ public void handle(final HttpExchange exchange) throws IOException { } else if (Regex.simpleMatch("PUT /upload/storage/v1/b/" + bucket + "/o?*uploadType=resumable*", request)) { // Resumable upload https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload final Map params = new HashMap<>(); - RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + RestUtils.decodeQueryString(exchange.getRequestURI(), params); final String blobName = params.get("test_blob_name"); if (blobs.containsKey(blobName) == false) { @@ -269,8 +269,6 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); } } finally { - int read = exchange.getRequestBody().read(); - assert read == -1 : "Request body should have been fully read here but saw [" + read + "]"; exchange.close(); } } diff --git a/test/fixtures/gcs-fixture/src/test/java/fixture/gcs/GoogleCloudStorageHttpHandlerTests.java b/test/fixtures/gcs-fixture/src/test/java/fixture/gcs/GoogleCloudStorageHttpHandlerTests.java new file mode 100644 index 0000000000000..0caaa983f76df --- /dev/null +++ b/test/fixtures/gcs-fixture/src/test/java/fixture/gcs/GoogleCloudStorageHttpHandlerTests.java @@ -0,0 +1,509 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.gcs; + +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpPrincipal; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.zip.GZIPOutputStream; + +public class GoogleCloudStorageHttpHandlerTests extends ESTestCase { + + private static final String HOST = "http://127.0.0.1:12345"; + private static final int RESUME_INCOMPLETE = 308; + + public void testRejectsBadUri() { + assertEquals( + RestStatus.NOT_FOUND.getStatus(), + handleRequest(new GoogleCloudStorageHttpHandler("bucket"), randomFrom("GET", "PUT", "POST", "DELETE", "HEAD"), "/not-in-bucket") + .status() + ); + } + + public void testCheckEndpoint() { + final var handler = new GoogleCloudStorageHttpHandler("bucket"); + + assertEquals( + RestStatus.OK, + handleRequest(handler, "GET", "/", BytesArray.EMPTY, Headers.of("Metadata-Flavor", "Google")).restStatus() + ); + } + + public void testSimpleObjectOperations() { + final var bucket = randomAlphaOfLength(10); + final var handler = new GoogleCloudStorageHttpHandler(bucket); + + assertEquals(RestStatus.NOT_FOUND, handleRequest(handler, "GET", "/download/storage/v1/b/" + bucket + "/o/blob").restStatus()); + + assertEquals( + new TestHttpResponse(RestStatus.OK, "{\"kind\":\"storage#objects\",\"items\":[],\"prefixes\":[]}"), + handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o") + ); + + // Multipart upload + final var body = randomAlphaOfLength(50); + assertEquals( + RestStatus.OK, + handleRequest( + handler, + "POST", + "/upload/storage/v1/b/" + bucket + "/?uploadType=multipart", + createGzipCompressedMultipartUploadBody(bucket, "path/blob", body) + ).restStatus() + ); + assertEquals( + new TestHttpResponse(RestStatus.OK, body), + handleRequest(handler, "GET", "/download/storage/v1/b/" + bucket + "/o/path/blob") + ); + + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#objects","items":[{"kind":"storage#object","bucket":"%s","name":"path/blob","id":"path/blob","size":"50"} + ],"prefixes":[]}""", bucket)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o")); + + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#objects","items":[{"kind":"storage#object","bucket":"%s","name":"path/blob","id":"path/blob","size":"50"} + ],"prefixes":[]}""", bucket)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o?prefix=path/")); + + assertEquals( + new TestHttpResponse(RestStatus.OK, """ + {"kind":"storage#objects","items":[],"prefixes":[]}"""), + handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o?prefix=path/other") + ); + + assertEquals( + new TestHttpResponse(RestStatus.OK, """ + --__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__ + Content-Length: 162 + Content-Type: application/http + content-id: 1 + content-transfer-encoding: binary + + HTTP/1.1 204 NO_CONTENT + + + + + --__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__ + """.replaceAll("\n", "\r\n")), + handleRequest( + handler, + "POST", + "/batch/storage/v1", + createBatchDeleteRequest(bucket, "path/blob"), + Headers.of("Content-Type", "mixed/multipart") + ) + ); + assertEquals( + RestStatus.OK, + handleRequest( + handler, + "POST", + "/batch/storage/v1", + createBatchDeleteRequest(bucket, "path/blob"), + Headers.of("Content-Type", "mixed/multipart") + ).restStatus() + ); + + assertEquals( + new TestHttpResponse(RestStatus.OK, """ + {"kind":"storage#objects","items":[],"prefixes":[]}"""), + handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o?prefix=path/") + ); + } + + public void testGetWithBytesRange() { + final var bucket = randomIdentifier(); + final var handler = new GoogleCloudStorageHttpHandler(bucket); + final var blobName = "blob_name_" + randomIdentifier(); + final var blobPath = "/download/storage/v1/b/" + bucket + "/o/" + blobName; + final var blobBytes = randomBytesReference(256); + + assertEquals( + RestStatus.OK, + handleRequest( + handler, + "POST", + "/upload/storage/v1/b/" + bucket + "/?uploadType=multipart", + createGzipCompressedMultipartUploadBody(bucket, blobName, blobBytes) + ).restStatus() + ); + + assertEquals( + "No Range", + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath) + ); + + var end = blobBytes.length() - 1; + assertEquals( + "Exact Range: bytes=0-" + end, + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(0, end)) + ); + + end = randomIntBetween(blobBytes.length() - 1, Integer.MAX_VALUE); + assertEquals( + "Larger Range: bytes=0-" + end, + new TestHttpResponse(RestStatus.OK, blobBytes, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(0, end)) + ); + + var start = randomIntBetween(blobBytes.length(), Integer.MAX_VALUE - 1); + end = randomIntBetween(start, Integer.MAX_VALUE); + assertEquals( + "Invalid Range: bytes=" + start + '-' + end, + new TestHttpResponse(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, BytesArray.EMPTY, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(start, end)) + ); + + start = randomIntBetween(0, blobBytes.length() - 1); + var length = randomIntBetween(1, blobBytes.length() - start); + end = start + length - 1; + assertEquals( + "Range: bytes=" + start + '-' + end, + new TestHttpResponse(RestStatus.OK, blobBytes.slice(start, length), TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "GET", blobPath, BytesArray.EMPTY, rangeHeader(start, end)) + ); + } + + public void testResumableUpload() { + final var bucket = randomIdentifier(); + final var handler = new GoogleCloudStorageHttpHandler(bucket); + final var blobName = "blob_name_" + randomIdentifier(); + + final var createUploadResponse = handleRequest( + handler, + "POST", + "/upload/storage/v1/b/" + bucket + "/?uploadType=resumable&name=" + blobName + ); + final var locationHeader = createUploadResponse.headers.getFirst("Location"); + final var sessionURI = locationHeader.substring(locationHeader.indexOf(HOST) + HOST.length()); + assertEquals(RestStatus.OK, createUploadResponse.restStatus()); + + final var part1 = randomAlphaOfLength(50); + final var uploadPart1Response = handleRequest(handler, "PUT", sessionURI, part1, contentRangeHeader(0, 50, null)); + assertEquals(new TestHttpResponse(RESUME_INCOMPLETE, rangeHeader(0, 50)), uploadPart1Response); + + assertEquals( + new TestHttpResponse(RESUME_INCOMPLETE, TestHttpExchange.EMPTY_HEADERS), + handleRequest(handler, "PUT", sessionURI, BytesArray.EMPTY, contentRangeHeader(null, null, null)) + ); + + final var part2 = randomAlphaOfLength(50); + final var uploadPart2Response = handleRequest(handler, "PUT", sessionURI, part2, contentRangeHeader(51, 100, null)); + assertEquals(new TestHttpResponse(RESUME_INCOMPLETE, rangeHeader(51, 100)), uploadPart2Response); + + final var part3 = randomAlphaOfLength(30); + final var uploadPart3Response = handleRequest(handler, "PUT", sessionURI, part3, contentRangeHeader(101, 130, 130)); + assertEquals(new TestHttpResponse(RestStatus.OK, TestHttpExchange.EMPTY_HEADERS), uploadPart3Response); + + // complete upload should be visible now + + // can download contents + assertEquals( + new TestHttpResponse(RestStatus.OK, part1 + part2 + part3), + handleRequest(handler, "GET", "/download/storage/v1/b/" + bucket + "/o/" + blobName) + ); + + // can see in listing + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#objects","items":[{"kind":"storage#object","bucket":"%s","name":"%s","id":"%s","size":"130"} + ],"prefixes":[]}""", bucket, blobName, blobName)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o")); + + // can get metadata + assertEquals(new TestHttpResponse(RestStatus.OK, Strings.format(""" + {"kind":"storage#object","bucket":"%s","name":"%s","id":"%s","size":"130"} + """, bucket, blobName, blobName)), handleRequest(handler, "GET", "/storage/v1/b/" + bucket + "/o/" + blobName)); + } + + private record TestHttpResponse(int status, BytesReference body, Headers headers) { + TestHttpResponse(RestStatus status, BytesReference body, Headers headers) { + this(status.getStatus(), body, headers); + } + + TestHttpResponse(RestStatus status, String body) { + this(status.getStatus(), new BytesArray(body.getBytes(StandardCharsets.UTF_8)), TestHttpExchange.EMPTY_HEADERS); + } + + TestHttpResponse(RestStatus status, Headers headers) { + this(status.getStatus(), BytesArray.EMPTY, headers); + } + + TestHttpResponse(int statusCode, Headers headers) { + this(statusCode, BytesArray.EMPTY, headers); + } + + RestStatus restStatus() { + return Objects.requireNonNull(RestStatus.fromCode(status)); + } + + @Override + public String toString() { + return "TestHttpResponse{" + "status=" + status + ", body={size=" + body.utf8ToString() + "}, headers=" + headers + '}'; + } + } + + private static TestHttpResponse handleRequest(GoogleCloudStorageHttpHandler handler, String method, String uri) { + return handleRequest(handler, method, uri, ""); + } + + private static TestHttpResponse handleRequest(GoogleCloudStorageHttpHandler handler, String method, String uri, String requestBody) { + return handleRequest(handler, method, uri, new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8))); + } + + private static TestHttpResponse handleRequest( + GoogleCloudStorageHttpHandler handler, + String method, + String uri, + String requestBody, + Headers headers + ) { + return handleRequest(handler, method, uri, new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8)), headers); + } + + private static TestHttpResponse handleRequest( + GoogleCloudStorageHttpHandler handler, + String method, + String uri, + BytesReference requestBody + ) { + return handleRequest(handler, method, uri, requestBody, TestHttpExchange.EMPTY_HEADERS); + } + + private static TestHttpResponse handleRequest( + GoogleCloudStorageHttpHandler handler, + String method, + String uri, + BytesReference requestBody, + Headers requestHeaders + ) { + final var httpExchange = new TestHttpExchange(method, uri, requestBody, requestHeaders); + try { + handler.handle(httpExchange); + } catch (IOException e) { + fail(e); + } + assertNotEquals(0, httpExchange.getResponseCode()); + var responseHeaders = new Headers(); + httpExchange.getResponseHeaders().forEach((header, values) -> { + // com.sun.net.httpserver.Headers.Headers() normalize keys + if ("Range".equals(header) || "Content-range".equals(header) || "Location".equals(header)) { + responseHeaders.put(header, List.copyOf(values)); + } + }); + return new TestHttpResponse(httpExchange.getResponseCode(), httpExchange.getResponseBodyContents(), responseHeaders); + } + + private static Headers contentRangeHeader(@Nullable Integer startInclusive, @Nullable Integer endInclusive, @Nullable Integer limit) { + final String rangeString = startInclusive != null && endInclusive != null ? startInclusive + "-" + endInclusive : "*"; + final String limitString = limit == null ? "*" : limit.toString(); + return Headers.of("Content-Range", "bytes " + rangeString + "/" + limitString); + } + + private static Headers rangeHeader(long start, long end) { + return Headers.of("Range", Strings.format("bytes=%d-%d", start, end)); + } + + private static BytesReference createGzipCompressedMultipartUploadBody(String bucketName, String path, String content) { + return createGzipCompressedMultipartUploadBody(bucketName, path, new BytesArray(content.getBytes(StandardCharsets.UTF_8))); + } + + private static BytesReference createGzipCompressedMultipartUploadBody(String bucketName, String path, BytesReference content) { + final String metadataString = Strings.format("{\"bucket\":\"%s\", \"name\":\"%s\"}", bucketName, path); + final BytesReference header = new BytesArray(Strings.format(""" + --__END_OF_PART__a607a67c-6df7-4b87-b8a1-81f639a75a97__ + Content-Length: %d + Content-Type: application/json; charset=UTF-8 + content-transfer-encoding: binary + + %s + --__END_OF_PART__a607a67c-6df7-4b87-b8a1-81f639a75a97__ + Content-Type: application/octet-stream + content-transfer-encoding: binary + + """.replaceAll("\n", "\r\n"), metadataString.length(), metadataString).getBytes(StandardCharsets.UTF_8)); + + final BytesReference footer = new BytesArray(""" + + --__END_OF_PART__a607a67c-6df7-4b87-b8a1-81f639a75a97__-- + """.replaceAll("\n", "\r\n")); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try (GZIPOutputStream gzipOutputStream = new GZIPOutputStream(out)) { + gzipOutputStream.write(BytesReference.toBytes(CompositeBytesReference.of(header, content, footer))); + } catch (IOException e) { + fail(e); + } + return new BytesArray(out.toByteArray()); + } + + private static String createBatchDeleteRequest(String bucketName, String... paths) { + final String deleteRequestTemplate = """ + DELETE %s/storage/v1/b/%s/o/%s HTTP/1.1 + Authorization: Bearer foo + x-goog-api-client: gl-java/23.0.0 gdcl/2.1.1 mac-os-x/15.2 + + + """; + final String partTemplate = """ + --__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__ + Content-Length: %d + Content-Type: application/http + content-id: %d + content-transfer-encoding: binary + + %s + """; + StringBuilder builder = new StringBuilder(); + AtomicInteger contentId = new AtomicInteger(); + Arrays.stream(paths).forEach(p -> { + final String deleteRequest = Strings.format(deleteRequestTemplate, HOST, bucketName, p); + final String part = Strings.format(partTemplate, deleteRequest.length(), contentId.incrementAndGet(), deleteRequest); + builder.append(part); + }); + builder.append("--__END_OF_PART__d8b50acb-87dc-4630-a3d3-17d187132ebc__"); + return builder.toString(); + } + + private static class TestHttpExchange extends HttpExchange { + + private static final Headers EMPTY_HEADERS = new Headers(); + + private final String method; + private final URI uri; + private final BytesReference requestBody; + private final Headers requestHeaders; + + private final Headers responseHeaders = new Headers(); + private final BytesStreamOutput responseBody = new BytesStreamOutput(); + private int responseCode; + + TestHttpExchange(String method, String uri, BytesReference requestBody, Headers requestHeaders) { + this.method = method; + this.uri = URI.create(uri); + this.requestBody = requestBody; + this.requestHeaders = new Headers(requestHeaders); + this.requestHeaders.add("Host", HOST); + } + + @Override + public Headers getRequestHeaders() { + return requestHeaders; + } + + @Override + public Headers getResponseHeaders() { + return responseHeaders; + } + + @Override + public URI getRequestURI() { + return uri; + } + + @Override + public String getRequestMethod() { + return method; + } + + @Override + public HttpContext getHttpContext() { + return null; + } + + @Override + public void close() {} + + @Override + public InputStream getRequestBody() { + try { + return requestBody.streamInput(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public OutputStream getResponseBody() { + return responseBody; + } + + @Override + public void sendResponseHeaders(int rCode, long responseLength) { + this.responseCode = rCode; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public int getResponseCode() { + return responseCode; + } + + public BytesReference getResponseBodyContents() { + return responseBody.bytes(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public String getProtocol() { + return "HTTP/1.1"; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public void setAttribute(String name, Object value) { + fail("setAttribute not implemented"); + } + + @Override + public void setStreams(InputStream i, OutputStream o) { + fail("setStreams not implemented"); + } + + @Override + public HttpPrincipal getPrincipal() { + fail("getPrincipal not implemented"); + throw new UnsupportedOperationException("getPrincipal not implemented"); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java index 64f8fa88762b8..20368753eac1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeUtils.java @@ -76,6 +76,7 @@ public static class Builder { private BuildVersion buildVersion; private Version version; private IndexVersion minIndexVersion; + private IndexVersion minReadOnlyIndexVersion; private IndexVersion maxIndexVersion; private String externalId; @@ -125,16 +126,23 @@ public Builder version(Version version, IndexVersion minIndexVersion, IndexVersi this.buildVersion = BuildVersion.fromVersionId(version.id()); this.version = version; this.minIndexVersion = minIndexVersion; + this.minReadOnlyIndexVersion = minIndexVersion; this.maxIndexVersion = maxIndexVersion; return this; } - public Builder version(BuildVersion version, IndexVersion minIndexVersion, IndexVersion maxIndexVersion) { + public Builder version( + BuildVersion version, + IndexVersion minIndexVersion, + IndexVersion minReadOnlyIndexVersion, + IndexVersion maxIndexVersion + ) { // see comment in VersionInformation assert version.equals(BuildVersion.current()); this.buildVersion = version; this.version = Version.CURRENT; this.minIndexVersion = minIndexVersion; + this.minReadOnlyIndexVersion = minReadOnlyIndexVersion; this.maxIndexVersion = maxIndexVersion; return this; } @@ -143,6 +151,7 @@ public Builder version(VersionInformation versions) { this.buildVersion = versions.buildVersion(); this.version = versions.nodeVersion(); this.minIndexVersion = versions.minIndexVersion(); + this.minReadOnlyIndexVersion = versions.minReadOnlyIndexVersion(); this.maxIndexVersion = versions.maxIndexVersion(); return this; } @@ -170,10 +179,10 @@ public DiscoveryNode build() { } VersionInformation versionInfo; - if (minIndexVersion == null || maxIndexVersion == null) { + if (minIndexVersion == null || minReadOnlyIndexVersion == null || maxIndexVersion == null) { versionInfo = VersionInformation.inferVersions(version); } else { - versionInfo = new VersionInformation(buildVersion, version, minIndexVersion, maxIndexVersion); + versionInfo = new VersionInformation(buildVersion, version, minIndexVersion, minReadOnlyIndexVersion, maxIndexVersion); } return new DiscoveryNode(name, id, ephemeralId, hostName, hostAddress, address, attributes, roles, versionInfo, externalId); diff --git a/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java b/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java index 47d239540814e..5cdb3f1808a38 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java +++ b/test/framework/src/main/java/org/elasticsearch/index/KnownIndexVersions.java @@ -16,7 +16,11 @@ */ public class KnownIndexVersions { /** - * A sorted list of all known transport versions + * A sorted list of all known index versions */ public static final List ALL_VERSIONS = List.copyOf(IndexVersions.getAllVersions()); + /** + * A sorted list of all known index versions that can be written to + */ + public static final List ALL_WRITE_VERSIONS = List.copyOf(IndexVersions.getAllWriteVersions()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 20cb66affddee..d239c6453a7fe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -241,6 +241,10 @@ protected static IndexSettings indexSettings() { return serviceHolder.idxSettings; } + protected static MapperService mapperService() { + return serviceHolder.mapperService; + } + protected static String expectedFieldName(String builderFieldName) { return ALIAS_TO_CONCRETE_FIELD_NAME.getOrDefault(builderFieldName, builderFieldName); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java index f83e7e17f9aaa..592cffac33552 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java @@ -24,32 +24,38 @@ public class IndexVersionUtils { private static final List ALL_VERSIONS = KnownIndexVersions.ALL_VERSIONS; + private static final List ALL_WRITE_VERSIONS = KnownIndexVersions.ALL_WRITE_VERSIONS; /** Returns all released versions */ public static List allReleasedVersions() { return ALL_VERSIONS; } - /** Returns the oldest known {@link IndexVersion} */ - public static IndexVersion getFirstVersion() { + /** Returns the oldest known {@link IndexVersion}. This version can only be read from and not written to */ + public static IndexVersion getLowestReadCompatibleVersion() { return ALL_VERSIONS.get(0); } + /** Returns the oldest known {@link IndexVersion} that can be written to */ + public static IndexVersion getLowestWriteCompatibleVersion() { + return ALL_WRITE_VERSIONS.get(0); + } + /** Returns a random {@link IndexVersion} from all available versions. */ public static IndexVersion randomVersion() { return ESTestCase.randomFrom(ALL_VERSIONS); } + /** Returns a random {@link IndexVersion} from all versions that can be written to. */ + public static IndexVersion randomWriteVersion() { + return ESTestCase.randomFrom(ALL_WRITE_VERSIONS); + } + /** Returns a random {@link IndexVersion} from all available versions without the ignore set */ public static IndexVersion randomVersion(Set ignore) { return ESTestCase.randomFrom(ALL_VERSIONS.stream().filter(v -> ignore.contains(v) == false).collect(Collectors.toList())); } - /** Returns a random {@link IndexVersion} from all available versions. */ - public static IndexVersion randomVersion(Random random) { - return ALL_VERSIONS.get(random.nextInt(ALL_VERSIONS.size())); - } - /** Returns a random {@link IndexVersion} between minVersion and maxVersion (inclusive). */ public static IndexVersion randomVersionBetween(Random random, @Nullable IndexVersion minVersion, @Nullable IndexVersion maxVersion) { if (minVersion != null && maxVersion != null && maxVersion.before(minVersion)) { diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchErrorTraceIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchErrorTraceIT.java new file mode 100644 index 0000000000000..39a6fa1e4b34f --- /dev/null +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchErrorTraceIT.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.search; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +public class AsyncSearchErrorTraceIT extends ESIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Collection> nodePlugins() { + return List.of(AsyncSearch.class); + } + + private AtomicBoolean transportMessageHasStackTrace; + + @Before + private void setupMessageListener() { + internalCluster().getDataNodeInstances(TransportService.class).forEach(ts -> { + ts.addMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + TransportMessageListener.super.onResponseSent(requestId, action, error); + if (action.startsWith("indices:data/read/search")) { + Optional throwable = ExceptionsHelper.unwrapCausesAndSuppressed( + error, + t -> t.getStackTrace().length > 0 + ); + transportMessageHasStackTrace.set(throwable.isPresent()); + } + } + }); + }); + } + + private void setupIndexWithDocs() { + createIndex("test1", "test2"); + indexRandom( + true, + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) + ); + refresh(); + } + + public void testAsyncSearchFailingQueryErrorTraceDefault() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was not sent from the data node to the coordinating node + assertFalse(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceTrue() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "true"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was sent from the data node to the coordinating node + assertTrue(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceFalse() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "false"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was not sent from the data node to the coordinating node + assertFalse(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceFalseOnSubmitAndTrueOnGet() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "true"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was not sent from the data node to the coordinating node + assertFalse(transportMessageHasStackTrace.get()); + } + + public void testAsyncSearchFailingQueryErrorTraceTrueOnSubmitAndFalseOnGet() throws IOException, InterruptedException { + transportMessageHasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_async_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + searchRequest.addParameter("keep_on_completion", "true"); + searchRequest.addParameter("wait_for_completion_timeout", "0ms"); + Map responseEntity = performRequestAndGetResponseEntityAfterDelay(searchRequest, TimeValue.ZERO); + String asyncExecutionId = (String) responseEntity.get("id"); + Request request = new Request("GET", "/_async_search/" + asyncExecutionId); + request.addParameter("error_trace", "false"); + while (responseEntity.get("is_running") instanceof Boolean isRunning && isRunning) { + responseEntity = performRequestAndGetResponseEntityAfterDelay(request, TimeValue.timeValueSeconds(1L)); + } + // check that the stack trace was sent from the data node to the coordinating node + assertTrue(transportMessageHasStackTrace.get()); + } + + private Map performRequestAndGetResponseEntityAfterDelay(Request r, TimeValue sleep) throws IOException, + InterruptedException { + Thread.sleep(sleep.millis()); + Response response = getRestClient().performRequest(r); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + return XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(), false); + } +} diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java index bd09d8f7740a1..952febd46c34c 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java @@ -55,6 +55,9 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + if (client.threadPool() != null && client.threadPool().getThreadContext() != null) { + client.threadPool().getThreadContext().setErrorTraceTransportHeader(request); + } SubmitAsyncSearchRequest submit = new SubmitAsyncSearchRequest(); IntConsumer setSize = size -> submit.getSearchRequest().source().size(size); // for simplicity, we share parsing with ordinary search. That means a couple of unsupported parameters, like scroll diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index fb37fb3575551..aa6e8de4ec27c 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -94,5 +94,6 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") task.skipTest("esql/61_enrich_ip/Invalid IP strings", "We switched from exceptions to null+warnings for ENRICH runtime errors") task.skipTest("esql/180_match_operator/match with non text field", "Match operator can now be used on non-text fields") + task.skipTest("esql/180_match_operator/match with functions", "Error message changed") }) diff --git a/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java b/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java index e5dfea7b772f2..4c61904475093 100644 --- a/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java +++ b/x-pack/plugin/ccr/src/javaRestTest/java/org/elasticsearch/xpack/ccr/rest/ShardChangesRestIT.java @@ -26,6 +26,9 @@ import org.junit.ClassRule; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; import java.util.List; import java.util.Locale; import java.util.Map; @@ -33,11 +36,14 @@ public class ShardChangesRestIT extends ESRestTestCase { private static final String CCR_SHARD_CHANGES_ENDPOINT = "/%s/ccr/shard_changes"; private static final String BULK_INDEX_ENDPOINT = "/%s/_bulk"; + private static final String DATA_STREAM_ENDPOINT = "/_data_stream/%s"; + private static final String INDEX_TEMPLATE_ENDPOINT = "/_index_template/%s"; private static final String[] SHARD_RESPONSE_FIELDS = new String[] { "took_in_millis", "operations", "shard_id", + "index_abstraction", "index", "settings_version", "max_seq_no_of_updates_or_deletes", @@ -46,6 +52,11 @@ public class ShardChangesRestIT extends ESRestTestCase { "aliases_version", "max_seq_no", "global_checkpoint" }; + + private static final String BULK_INDEX_TEMPLATE = """ + { "index": { "op_type": "create" } } + { "@timestamp": "%s", "name": "%s" } + """;; private static final String[] NAMES = { "skywalker", "leia", "obi-wan", "yoda", "chewbacca", "r2-d2", "c-3po", "darth-vader" }; @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() @@ -99,13 +110,86 @@ public void testShardChangesDefaultParams() throws IOException { createIndex(indexName, settings, mappings); assertTrue(indexExists(indexName)); - assertOK(client().performRequest(bulkRequest(indexName, randomIntBetween(10, 20)))); + assertOK(bulkIndex(indexName, randomIntBetween(10, 20))); final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(indexName)); final Response response = client().performRequest(shardChangesRequest); assertOK(response); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + indexName + ); + } + + public void testDataStreamShardChangesDefaultParams() throws IOException { + final String templateName = randomAlphanumericOfLength(8).toLowerCase(Locale.ROOT); + assertOK(createIndexTemplate(templateName, """ + { + "index_patterns": [ "test-*-*" ], + "data_stream": {}, + "priority": 100, + "template": { + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "name": { + "type": "keyword" + } + } + } + } + }""")); + + final String dataStreamName = "test-" + + randomAlphanumericOfLength(5).toLowerCase(Locale.ROOT) + + "-" + + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + assertOK(createDataStream(dataStreamName)); + + assertOK(bulkIndex(dataStreamName, randomIntBetween(10, 20))); + + final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(dataStreamName)); + final Response response = client().performRequest(shardChangesRequest); + assertOK(response); + assertShardChangesResponse( + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + dataStreamName + ); + } + + public void testIndexAliasShardChangesDefaultParams() throws IOException { + final String indexName = randomAlphanumericOfLength(10).toLowerCase(Locale.ROOT); + final String aliasName = randomAlphanumericOfLength(8).toLowerCase(Locale.ROOT); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + final String mappings = """ + { + "properties": { + "name": { + "type": "keyword" + } + } + } + """; + createIndex(indexName, settings, mappings); + assertTrue(indexExists(indexName)); + + final Request putAliasRequest = new Request("PUT", "/" + indexName + "/_alias/" + aliasName); + assertOK(client().performRequest(putAliasRequest)); + + assertOK(bulkIndex(aliasName, randomIntBetween(10, 20))); + + final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(aliasName)); + final Response response = client().performRequest(shardChangesRequest); + assertOK(response); + assertShardChangesResponse( + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + aliasName ); } @@ -121,7 +205,7 @@ public void testShardChangesWithAllParameters() throws IOException { ); assertTrue(indexExists(indexName)); - assertOK(client().performRequest(bulkRequest(indexName, randomIntBetween(100, 200)))); + assertOK(bulkIndex(indexName, randomIntBetween(100, 200))); final Request shardChangesRequest = new Request("GET", shardChangesEndpoint(indexName)); shardChangesRequest.addParameter("from_seq_no", "0"); @@ -132,7 +216,8 @@ public void testShardChangesWithAllParameters() throws IOException { final Response response = client().performRequest(shardChangesRequest); assertOK(response); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false), + indexName ); } @@ -148,7 +233,7 @@ public void testShardChangesMultipleRequests() throws IOException { ); assertTrue(indexExists(indexName)); - assertOK(client().performRequest(bulkRequest(indexName, randomIntBetween(100, 200)))); + assertOK(bulkIndex(indexName, randomIntBetween(100, 200))); final Request firstRequest = new Request("GET", shardChangesEndpoint(indexName)); firstRequest.addParameter("from_seq_no", "0"); @@ -159,7 +244,8 @@ public void testShardChangesMultipleRequests() throws IOException { final Response firstResponse = client().performRequest(firstRequest); assertOK(firstResponse); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(firstResponse.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(firstResponse.getEntity()), false), + indexName ); final Request secondRequest = new Request("GET", shardChangesEndpoint(indexName)); @@ -171,7 +257,8 @@ public void testShardChangesMultipleRequests() throws IOException { final Response secondResponse = client().performRequest(secondRequest); assertOK(secondResponse); assertShardChangesResponse( - XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(secondResponse.getEntity()), false) + XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(secondResponse.getEntity()), false), + indexName ); } @@ -231,17 +318,36 @@ public void testShardChangesMissingIndex() throws IOException { assertResponseException(ex, RestStatus.BAD_REQUEST, "Failed to process shard changes for index [" + indexName + "]"); } - private static Request bulkRequest(final String indexName, int numberOfDocuments) { + private static Response bulkIndex(final String indexName, int numberOfDocuments) throws IOException { final StringBuilder sb = new StringBuilder(); + long timestamp = System.currentTimeMillis(); for (int i = 0; i < numberOfDocuments; i++) { - sb.append(String.format(Locale.ROOT, "{ \"index\": { \"_id\": \"%d\" } }\n{ \"name\": \"%s\" }\n", i + 1, randomFrom(NAMES))); + sb.append( + String.format( + Locale.ROOT, + BULK_INDEX_TEMPLATE, + Instant.ofEpochMilli(timestamp).atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME), + randomFrom(NAMES) + ) + ); + timestamp += 1000; // 1 second } final Request request = new Request("POST", bulkEndpoint(indexName)); request.setJsonEntity(sb.toString()); request.addParameter("refresh", "true"); - return request; + return client().performRequest(request); + } + + private Response createDataStream(final String dataStreamName) throws IOException { + return client().performRequest(new Request("PUT", dataStreamEndpoint(dataStreamName))); + } + + private static Response createIndexTemplate(final String templateName, final String mappings) throws IOException { + final Request request = new Request("PUT", indexTemplateEndpoint(templateName)); + request.setJsonEntity(mappings); + return client().performRequest(request); } private static String shardChangesEndpoint(final String indexName) { @@ -252,16 +358,28 @@ private static String bulkEndpoint(final String indexName) { return String.format(Locale.ROOT, BULK_INDEX_ENDPOINT, indexName); } + private static String dataStreamEndpoint(final String dataStreamName) { + return String.format(Locale.ROOT, DATA_STREAM_ENDPOINT, dataStreamName); + } + + private static String indexTemplateEndpoint(final String templateName) { + return String.format(Locale.ROOT, INDEX_TEMPLATE_ENDPOINT, templateName); + } + private void assertResponseException(final ResponseException ex, final RestStatus restStatus, final String error) { assertEquals(restStatus.getStatus(), ex.getResponse().getStatusLine().getStatusCode()); assertThat(ex.getMessage(), Matchers.containsString(error)); } - private void assertShardChangesResponse(final Map shardChangesResponseBody) { + private void assertShardChangesResponse(final Map shardChangesResponseBody, final String indexAbstractionName) { for (final String fieldName : SHARD_RESPONSE_FIELDS) { final Object fieldValue = shardChangesResponseBody.get(fieldName); assertNotNull("Field " + fieldName + " is missing or has a null value.", fieldValue); + if ("index_abstraction".equals(fieldName)) { + assertEquals(indexAbstractionName, fieldValue); + } + if ("operations".equals(fieldName)) { if (fieldValue instanceof List operationsList) { assertFalse("Field 'operations' is empty.", operationsList.isEmpty()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java index 84171ebce162f..4a1d26d05a980 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java @@ -10,6 +10,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,6 +34,7 @@ import java.util.Arrays; import java.util.Comparator; import java.util.List; +import java.util.Locale; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -42,10 +45,14 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; /** - * A REST handler that retrieves shard changes in a specific index whose name is provided as a parameter. - * It handles GET requests to the "/{index}/ccr/shard_changes" endpoint retrieving shard-level changes, - * such as translog operations, mapping version, settings version, aliases version, the global checkpoint, - * maximum sequence number and maximum sequence number of updates or deletes. + * A REST handler that retrieves shard changes in a specific index, data stream or alias whose name is + * provided as a parameter. It handles GET requests to the "/{index}/ccr/shard_changes" endpoint retrieving + * shard-level changes, such as Translog operations, mapping version, settings version, aliases version, + * the global checkpoint, maximum sequence number and maximum sequence number of updates or deletes. + *

+ * In the case of a data stream, the first backing index is considered the target for retrieving shard changes. + * In the case of an alias, the first index that the alias points to is considered the target for retrieving + * shard changes. *

* Note: This handler is only available for snapshot builds. */ @@ -84,32 +91,36 @@ public List routes() { */ @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { - final var indexName = restRequest.param(INDEX_PARAM_NAME); + final var indexAbstractionName = restRequest.param(INDEX_PARAM_NAME); final var fromSeqNo = restRequest.paramAsLong(FROM_SEQ_NO_PARAM_NAME, DEFAULT_FROM_SEQ_NO); final var maxBatchSize = restRequest.paramAsSize(MAX_BATCH_SIZE_PARAM_NAME, DEFAULT_MAX_BATCH_SIZE); final var pollTimeout = restRequest.paramAsTime(POLL_TIMEOUT_PARAM_NAME, DEFAULT_POLL_TIMEOUT); final var maxOperationsCount = restRequest.paramAsInt(MAX_OPERATIONS_COUNT_PARAM_NAME, DEFAULT_MAX_OPERATIONS_COUNT); - final CompletableFuture indexUUIDCompletableFuture = asyncGetIndexUUID( + // NOTE: we first retrieve the concrete index name in case we are dealing with an alias or data stream. + // Then we use the concrete index name to retrieve the index UUID and shard stats. + final CompletableFuture indexNameCompletableFuture = asyncGetIndexName( client, - indexName, + indexAbstractionName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME) ); - final CompletableFuture shardStatsCompletableFuture = asyncShardStats( - client, - indexName, - client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME) + final CompletableFuture indexUUIDCompletableFuture = indexNameCompletableFuture.thenCompose( + concreteIndexName -> asyncGetIndexUUID(client, concreteIndexName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) + ); + final CompletableFuture shardStatsCompletableFuture = indexNameCompletableFuture.thenCompose( + concreteIndexName -> asyncShardStats(client, concreteIndexName, client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME)) ); return channel -> CompletableFuture.allOf(indexUUIDCompletableFuture, shardStatsCompletableFuture).thenRun(() -> { try { + final String concreteIndexName = indexNameCompletableFuture.get(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS); final String indexUUID = indexUUIDCompletableFuture.get(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS); final ShardStats shardStats = shardStatsCompletableFuture.get(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS); final ShardId shardId = shardStats.getShardRouting().shardId(); final String expectedHistoryUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); final ShardChangesAction.Request shardChangesRequest = shardChangesRequest( - indexName, + concreteIndexName, indexUUID, shardId, expectedHistoryUUID, @@ -121,7 +132,12 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina client.execute(ShardChangesAction.INSTANCE, shardChangesRequest, new RestActionListener<>(channel) { @Override protected void processResponse(final ShardChangesAction.Response response) { - channel.sendResponse(new RestResponse(RestStatus.OK, shardChangesResponseToXContent(response, indexName, shardId))); + channel.sendResponse( + new RestResponse( + RestStatus.OK, + shardChangesResponseToXContent(response, indexAbstractionName, concreteIndexName, shardId) + ) + ); } }); @@ -132,7 +148,12 @@ protected void processResponse(final ShardChangesAction.Response response) { throw new IllegalStateException("Timeout while waiting for shard stats or index UUID", te); } }).exceptionally(ex -> { - channel.sendResponse(new RestResponse(RestStatus.BAD_REQUEST, "Failed to process shard changes for index [" + indexName + "]")); + channel.sendResponse( + new RestResponse( + RestStatus.BAD_REQUEST, + "Failed to process shard changes for index [" + indexAbstractionName + "] " + ex.getMessage() + ) + ); return null; }); } @@ -175,17 +196,20 @@ private static ShardChangesAction.Request shardChangesRequest( * Converts the response to XContent JSOn format. * * @param response The ShardChangesAction response. - * @param indexName The name of the index. + * @param indexAbstractionName The name of the index abstraction. + * @param concreteIndexName The name of the index. * @param shardId The ShardId. */ private static XContentBuilder shardChangesResponseToXContent( final ShardChangesAction.Response response, - final String indexName, + final String indexAbstractionName, + final String concreteIndexName, final ShardId shardId ) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); - builder.field("index", indexName); + builder.field("index_abstraction", indexAbstractionName); + builder.field("index", concreteIndexName); builder.field("shard_id", shardId); builder.field("mapping_version", response.getMappingVersion()); builder.field("settings_version", response.getSettingsVersion()); @@ -249,26 +273,60 @@ private static CompletableFuture supplyAsyncTask( }, executorService); } + /** + * Asynchronously retrieves the index name for a given index, alias or data stream. + * If the name represents a data stream, the name of the first backing index is returned. + * If the name represents an alias, the name of the first index that the alias points to is returned. + * + * @param client The NodeClient for executing the asynchronous request. + * @param indexAbstractionName The name of the index, alias or data stream. + * @return A CompletableFuture that completes with the retrieved index name. + */ + private static CompletableFuture asyncGetIndexName( + final NodeClient client, + final String indexAbstractionName, + final ExecutorService executorService + ) { + return supplyAsyncTask(() -> { + final ClusterState clusterState = client.admin() + .cluster() + .prepareState(new TimeValue(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS)) + .get(GET_INDEX_UUID_TIMEOUT) + .getState(); + final IndexAbstraction indexAbstraction = clusterState.metadata().getIndicesLookup().get(indexAbstractionName); + if (indexAbstraction == null) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Invalid index or data stream name [%s]", indexAbstractionName) + ); + } + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM + || indexAbstraction.getType() == IndexAbstraction.Type.ALIAS) { + return indexAbstraction.getIndices().getFirst().getName(); + } + return indexAbstractionName; + }, executorService, "Error while retrieving index name for index or data stream [" + indexAbstractionName + "]"); + } + /** * Asynchronously retrieves the shard stats for a given index using an executor service. * * @param client The NodeClient for executing the asynchronous request. - * @param indexName The name of the index for which to retrieve shard statistics. + * @param concreteIndexName The name of the index for which to retrieve shard statistics. * @param executorService The executorService service for executing the asynchronous task. * @return A CompletableFuture that completes with the retrieved ShardStats. * @throws ElasticsearchException If an error occurs while retrieving shard statistics. */ private static CompletableFuture asyncShardStats( final NodeClient client, - final String indexName, + final String concreteIndexName, final ExecutorService executorService ) { return supplyAsyncTask( - () -> Arrays.stream(client.admin().indices().prepareStats(indexName).clear().get(SHARD_STATS_TIMEOUT).getShards()) + () -> Arrays.stream(client.admin().indices().prepareStats(concreteIndexName).clear().get(SHARD_STATS_TIMEOUT).getShards()) .max(Comparator.comparingLong(shardStats -> shardStats.getCommitStats().getGeneration())) - .orElseThrow(() -> new ElasticsearchException("Unable to retrieve shard stats for index: " + indexName)), + .orElseThrow(() -> new ElasticsearchException("Unable to retrieve shard stats for index: " + concreteIndexName)), executorService, - "Error while retrieving shard stats for index [" + indexName + "]" + "Error while retrieving shard stats for index [" + concreteIndexName + "]" ); } @@ -276,25 +334,25 @@ private static CompletableFuture asyncShardStats( * Asynchronously retrieves the index UUID for a given index using an executor service. * * @param client The NodeClient for executing the asynchronous request. - * @param indexName The name of the index for which to retrieve the index UUID. + * @param concreteIndexName The name of the index for which to retrieve the index UUID. * @param executorService The executorService service for executing the asynchronous task. * @return A CompletableFuture that completes with the retrieved index UUID. * @throws ElasticsearchException If an error occurs while retrieving the index UUID. */ private static CompletableFuture asyncGetIndexUUID( final NodeClient client, - final String indexName, + final String concreteIndexName, final ExecutorService executorService ) { return supplyAsyncTask( () -> client.admin() .indices() .prepareGetIndex() - .setIndices(indexName) + .setIndices(concreteIndexName) .get(GET_INDEX_UUID_TIMEOUT) - .getSetting(indexName, IndexMetadata.SETTING_INDEX_UUID), + .getSetting(concreteIndexName, IndexMetadata.SETTING_INDEX_UUID), executorService, - "Error while retrieving index UUID for index [" + indexName + "]" + "Error while retrieving index UUID for index [" + concreteIndexName + "]" ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java index e9e4e90421adc..35cba890e5e0c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilder.java @@ -90,7 +90,8 @@ public SparseVectorQueryBuilder( : (this.shouldPruneTokens ? new TokenPruningConfig() : null)); this.weightedTokensSupplier = null; - if (queryVectors == null ^ inferenceId == null == false) { + // Preserve BWC error messaging + if (queryVectors != null && inferenceId != null) { throw new IllegalArgumentException( "[" + NAME @@ -98,18 +99,24 @@ public SparseVectorQueryBuilder( + QUERY_VECTOR_FIELD.getPreferredName() + "] or [" + INFERENCE_ID_FIELD.getPreferredName() - + "]" + + "] for " + + ALLOWED_FIELD_TYPE + + " fields" ); } - if (inferenceId != null && query == null) { + + // Preserve BWC error messaging + if ((queryVectors == null) == (query == null)) { throw new IllegalArgumentException( "[" + NAME - + "] requires [" - + QUERY_FIELD.getPreferredName() - + "] when [" + + "] requires one of [" + + QUERY_VECTOR_FIELD.getPreferredName() + + "] or [" + INFERENCE_ID_FIELD.getPreferredName() - + "] is specified" + + "] for " + + ALLOWED_FIELD_TYPE + + " fields" ); } } @@ -143,6 +150,14 @@ public List getQueryVectors() { return queryVectors; } + public String getInferenceId() { + return inferenceId; + } + + public String getQuery() { + return query; + } + public boolean shouldPruneTokens() { return shouldPruneTokens; } @@ -176,7 +191,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep } builder.endObject(); } else { - builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + if (inferenceId != null) { + builder.field(INFERENCE_ID_FIELD.getPreferredName(), inferenceId); + } builder.field(QUERY_FIELD.getPreferredName(), query); } builder.field(PRUNE_FIELD.getPreferredName(), shouldPruneTokens); @@ -228,6 +245,11 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { shouldPruneTokens, tokenPruningConfig ); + } else if (inferenceId == null) { + // Edge case, where inference_id was not specified in the request, + // but we did not intercept this and rewrite to a query o field with + // pre-configured inference. So we trap here and output a nicer error message. + throw new IllegalArgumentException("inference_id required to perform vector search on query string"); } // TODO move this to xpack core and use inference APIs diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java index 24f0a52436203..92bb037888495 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java @@ -159,13 +159,15 @@ private static void buildRoleQuery( if (queryBuilder != null) { failIfQueryUsesClient(queryBuilder, context); Query roleQuery = context.toQuery(queryBuilder).query(); - filter.add(roleQuery, SHOULD); - if (context.nestedLookup() != NestedLookup.EMPTY) { + if (context.nestedLookup() == NestedLookup.EMPTY) { + filter.add(roleQuery, SHOULD); + } else { if (NestedHelper.mightMatchNestedDocs(roleQuery, context)) { roleQuery = new BooleanQuery.Builder().add(roleQuery, FILTER) .add(Queries.newNonNestedFilter(context.indexVersionCreated()), FILTER) .build(); } + filter.add(roleQuery, SHOULD); // If access is allowed on root doc then also access is allowed on all nested docs of that root document: BitSetProducer rootDocs = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated())); ToChildBlockJoinQuery includeNestedDocs = new ToChildBlockJoinQuery(roleQuery, rootDocs); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index d0d5e463f9652..9704335776f11 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -596,8 +596,6 @@ static Map getSSLSettingsMap(Settings settings) { sslSettingsMap.put(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX, settings.getByPrefix(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX)); sslSettingsMap.put(XPackSettings.TRANSPORT_SSL_PREFIX, settings.getByPrefix(XPackSettings.TRANSPORT_SSL_PREFIX)); sslSettingsMap.putAll(getTransportProfileSSLSettings(settings)); - // Mount Elastic Inference Service (part of the Inference plugin) configuration - sslSettingsMap.put("xpack.inference.elastic.http.ssl", settings.getByPrefix("xpack.inference.elastic.http.ssl.")); // Only build remote cluster server SSL if the port is enabled if (REMOTE_CLUSTER_SERVER_ENABLED.get(settings)) { sslSettingsMap.put(XPackSettings.REMOTE_CLUSTER_SERVER_SSL_PREFIX, getRemoteClusterServerSslSettings(settings)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index d50f7bb27a5df..1f2c89c473a62 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -623,7 +623,7 @@ public Map getSnapshotCommitSup } @SuppressWarnings("unchecked") - protected List filterPlugins(Class type) { + private List filterPlugins(Class type) { return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T) p)).collect(Collectors.toList()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java index a5c1ba45d90b7..af557ed6b7f82 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java @@ -260,16 +260,16 @@ public void testIllegalValues() { { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new SparseVectorQueryBuilder("field name", null, "model id") + () -> new SparseVectorQueryBuilder("field name", null, null) ); - assertEquals("[sparse_vector] requires one of [query_vector] or [inference_id]", e.getMessage()); + assertEquals("[sparse_vector] requires one of [query_vector] or [inference_id] for sparse_vector fields", e.getMessage()); } { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> new SparseVectorQueryBuilder("field name", "model text", null) ); - assertEquals("[sparse_vector] requires [query] when [inference_id] is specified", e.getMessage()); + assertEquals("[sparse_vector] requires one of [query_vector] or [inference_id] for sparse_vector fields", e.getMessage()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java index 4751f66cf548e..89b42228d8918 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java @@ -23,9 +23,12 @@ import org.apache.lucene.search.TotalHitCountCollectorManager; import org.apache.lucene.store.Directory; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexSettings; @@ -33,9 +36,11 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperMetrics; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.MockFieldMapper; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -45,6 +50,9 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.test.AbstractBuilderTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; @@ -52,6 +60,8 @@ import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import java.io.IOException; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -340,6 +350,176 @@ protected IndicesAccessControl getIndicesAccessControl() { directory.close(); } + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("f1") + .field("type", "keyword") + .endObject() + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(Strings.toString(builder)), + MapperService.MergeReason.MAPPING_UPDATE + ); + } + + public void testDLSWithNestedDocs() throws Exception { + Directory directory = newDirectory(); + try ( + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + var parser = mapperService().documentParser(); + String doc = """ + { + "f1": "value", + "nested1": [ + { + "field": "0" + }, + { + "field": "1" + }, + {} + ] + } + """; + var parsedDoc = parser.parseDocument( + new SourceToParse("0", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + doc = """ + { + "nested1": [ + { + "field": "12" + }, + { + "field": "13" + }, + {} + ] + } + """; + parsedDoc = parser.parseDocument( + new SourceToParse("1", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + doc = """ + { + "f1": "value", + "nested1": [ + { + "field": "12" + }, + {} + ] + } + """; + parsedDoc = parser.parseDocument( + new SourceToParse("2", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + doc = """ + { + "nested1": [ + { + "field": "12" + }, + {} + ] + } + """; + parsedDoc = parser.parseDocument( + new SourceToParse("3", new BytesArray(doc), XContentType.JSON), + mapperService().mappingLookup() + ); + iw.addDocuments(parsedDoc.docs()); + + iw.commit(); + } + + DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap( + DirectoryReader.open(directory), + new ShardId(indexSettings().getIndex(), 0) + ); + SearchExecutionContext context = createSearchExecutionContext(new IndexSearcher(directoryReader)); + + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + final Authentication authentication = AuthenticationTestHelper.builder().build(); + new AuthenticationContextSerializer().writeToContext(authentication, threadContext); + + Set queries = new HashSet<>(); + queries.add(new BytesArray("{\"bool\": { \"must_not\": { \"exists\": { \"field\": \"f1\" } } } }")); + IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl( + FieldPermissions.DEFAULT, + DocumentPermissions.filteredBy(queries) + ); + + DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor()); + + final MockLicenseState licenseState = mock(MockLicenseState.class); + when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true); + ScriptService scriptService = mock(ScriptService.class); + SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper( + s -> context, + bitsetCache, + securityContext, + licenseState, + scriptService + ) { + + @Override + protected IndicesAccessControl getIndicesAccessControl() { + IndicesAccessControl indicesAccessControl = new IndicesAccessControl( + true, + singletonMap(indexSettings().getIndex().getName(), indexAccessControl) + ); + return indicesAccessControl; + } + }; + + DirectoryReader wrappedDirectoryReader = wrapper.apply(directoryReader); + IndexSearcher indexSearcher = new ContextIndexSearcher( + wrappedDirectoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true + ); + + ScoreDoc[] hits = indexSearcher.search(new MatchAllDocsQuery(), 1000).scoreDocs; + assertThat(Arrays.stream(hits).map(h -> h.doc).collect(Collectors.toSet()), containsInAnyOrder(4, 5, 6, 7, 11, 12, 13)); + + hits = indexSearcher.search(Queries.newNonNestedFilter(context.indexVersionCreated()), 1000).scoreDocs; + assertThat(Arrays.stream(hits).map(h -> h.doc).collect(Collectors.toSet()), containsInAnyOrder(7, 13)); + + bitsetCache.close(); + directoryReader.close(); + directory.close(); + } + private static MappingLookup createMappingLookup(List concreteFields) { List mappers = concreteFields.stream().map(MockFieldMapper::new).collect(Collectors.toList()); return MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index bfac286bc3c35..9663e41a647a8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -614,8 +614,7 @@ public void testGetConfigurationByContextName() throws Exception { "xpack.security.authc.realms.ldap.realm1.ssl", "xpack.security.authc.realms.saml.realm2.ssl", "xpack.monitoring.exporters.mon1.ssl", - "xpack.monitoring.exporters.mon2.ssl", - "xpack.inference.elastic.http.ssl" }; + "xpack.monitoring.exporters.mon2.ssl" }; assumeTrue("Not enough cipher suites are available to support this test", getCipherSuites.length >= contextNames.length); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json index 651e1c84da73a..5afa557e1405e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-mappings.json @@ -7,7 +7,7 @@ "dynamic": "false", "_meta": { "pipeline": { - "default_name": "ent-search-generic-ingestion", + "default_name": "search-default-ingestion", "default_extract_binary_content": true, "default_run_ml_inference": true, "default_reduce_whitespace": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json deleted file mode 100644 index e2a2cbd460117..0000000000000 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "version": ${xpack.application.connector.template.version}, - "description": "Generic Enterprise Search ingest pipeline", - "_meta": { - "managed_by": "Enterprise Search", - "managed": true - }, - "processors": [ - { - "attachment": { - "description": "Extract text from binary attachments", - "field": "_attachment", - "target_field": "_extracted_attachment", - "ignore_missing": true, - "indexed_chars_field": "_attachment_indexed_chars", - "if": "ctx?._extract_binary_content == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'attachment' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ], - "remove_binary": false - } - }, - { - "set": { - "tag": "set_body", - "description": "Set any extracted text on the 'body' field", - "field": "body", - "copy_from": "_extracted_attachment.content", - "ignore_empty_value": true, - "if": "ctx?._extract_binary_content == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'set' with tag 'set_body' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "gsub": { - "tag": "remove_replacement_chars", - "description": "Remove unicode 'replacement' characters", - "field": "body", - "pattern": "�", - "replacement": "", - "ignore_missing": true, - "if": "ctx?._extract_binary_content == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'gsub' with tag 'remove_replacement_chars' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "gsub": { - "tag": "remove_extra_whitespace", - "description": "Squish whitespace", - "field": "body", - "pattern": "\\s+", - "replacement": " ", - "ignore_missing": true, - "if": "ctx?._reduce_whitespace == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'gsub' with tag 'remove_extra_whitespace' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "trim" : { - "description": "Trim leading and trailing whitespace", - "field": "body", - "ignore_missing": true, - "if": "ctx?._reduce_whitespace == true", - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'trim' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - }, - { - "remove": { - "tag": "remove_meta_fields", - "description": "Remove meta fields", - "field": [ - "_attachment", - "_attachment_indexed_chars", - "_extracted_attachment", - "_extract_binary_content", - "_reduce_whitespace", - "_run_ml_inference" - ], - "ignore_missing": true, - "on_failure": [ - { - "append": { - "description": "Record error information", - "field": "_ingestion_errors", - "value": "Processor 'remove' with tag 'remove_meta_fields' in pipeline '{{ _ingest.on_failure_pipeline }}' failed with message '{{ _ingest.on_failure_message }}'" - } - } - ] - } - } - ] -} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json index 4415afe50a998..e0bde4715839e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ml/anomalydetection/results_index_mappings.json @@ -5,7 +5,15 @@ }, "dynamic_templates" : [ { - "strings_as_keywords" : { + "map_objects": { + "match_mapping_type": "object", + "mapping": { + "type": "object" + } + } + }, + { + "non_objects_as_keywords" : { "match" : "*", "mapping" : { "type" : "keyword" diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/70_flattened_field_type.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/70_flattened_field_type.yml new file mode 100644 index 0000000000000..0f586ec0ed669 --- /dev/null +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/70_flattened_field_type.yml @@ -0,0 +1,307 @@ +--- +"A flattened label field": + - do: + indices.create: + index: source_index + body: + settings: + number_of_shards: 1 + index: + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + subobjects: false + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + agent: + type: flattened + value: + type: long + time_series_metric: gauge + + - do: + bulk: + refresh: true + index: source_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.4" }, "value": 10 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.5" }, "value": 20 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.6" }, "value": 12 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.7" }, "value": 15 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.7" }, "value": 9 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.8" }, "value": 16 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.9" }, "value": 25 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.10" }, "value": 17 }}' + + - do: + indices.put_settings: + index: source_index + body: + index.blocks.write: true + + - do: + indices.downsample: + index: source_index + target_index: target_index + body: > + { + "fixed_interval": "1h" + } + - is_true: acknowledged + + - do: + search: + index: target_index + body: + sort: [ "_tsid", "@timestamp" ] + + - length: { hits.hits: 4 } + - match: { hits.hits.0._source._doc_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } + - match: { hits.hits.0._source.k8s\.agent: { "id": "second", "version": "2.1.8" } } + + - match: { hits.hits.1._source._doc_count: 2 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.@timestamp: 2021-04-28T19:00:00.000Z } + - match: { hits.hits.1._source.k8s\.agent: { "id": "second", "version": "2.1.10" } } + + - match: { hits.hits.2._source._doc_count: 2 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } + - match: { hits.hits.2._source.k8s\.agent: { "id": "first", "version": "2.0.5" } } + + - match: { hits.hits.3._source._doc_count: 2 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.@timestamp: 2021-04-28T20:00:00.000Z } + - match: { hits.hits.3._source.k8s\.agent: { "id": "first", "version": "2.0.7" } } + +--- +"A flattened label field with no doc values": + - do: + indices.create: + index: source_index + body: + settings: + number_of_shards: 1 + index: + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + subobjects: false + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + agent: + type: flattened + doc_values: false + value: + type: long + time_series_metric: gauge + + - do: + bulk: + refresh: true + index: source_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.4" }, "value": 10 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.5" }, "value": 20 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.6" }, "value": 12 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.7" }, "value": 15 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.7" }, "value": 9 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.8" }, "value": 16 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.9" }, "value": 25 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.10" }, "value": 17 }}' + + - do: + indices.put_settings: + index: source_index + body: + index.blocks.write: true + + - do: + indices.downsample: + index: source_index + target_index: target_index + body: > + { + "fixed_interval": "1h" + } + - is_true: acknowledged + + - do: + search: + index: target_index + body: + sort: [ "_tsid", "@timestamp" ] + + - length: { hits.hits: 4 } + - match: { hits.hits.0._source._doc_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } + - is_false: hits.hits.0._source.k8s\.agent + + - match: { hits.hits.1._source._doc_count: 2 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.@timestamp: 2021-04-28T19:00:00.000Z } + - is_false: hits.hits.1._source.k8s\.agent + + - match: { hits.hits.2._source._doc_count: 2 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } + - is_false: hits.hits.2._source.k8s\.agent + + - match: { hits.hits.3._source._doc_count: 2 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.@timestamp: 2021-04-28T20:00:00.000Z } + - is_false: hits.hits.3._source.k8s\.agent + +--- +"A flattened label field with mixed content": + - do: + indices.create: + index: source_index + body: + settings: + number_of_shards: 1 + index: + mode: time_series + routing_path: [ metricset, k8s.pod.uid ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + subobjects: false + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + agent: + type: flattened + null_value: my_null_value + value: + type: long + time_series_metric: gauge + + - do: + bulk: + refresh: true + index: source_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.4", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11 }, "value": 10 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.5", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 20 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.6", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 12 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507"}, "agent": { "id": "first", "version": "2.0.7", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 15 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.7", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 9 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.8", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 16 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.9", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 25 }}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9"}, "agent": { "id": "second", "version": "2.1.10", "versions": ["1", "2", "3"], "optional_version": null, "dotted.version": "1.1", "numeric_version": 11}, "value": 17 }}' + + - do: + indices.put_settings: + index: source_index + body: + index.blocks.write: true + + - do: + indices.downsample: + index: source_index + target_index: target_index + body: > + { + "fixed_interval": "1h" + } + - is_true: acknowledged + + - do: + search: + index: target_index + body: + sort: [ "_tsid", "@timestamp" ] + + - length: { hits.hits: 4 } + - match: { hits.hits.0._source._doc_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } + - match: { hits.hits.0._source.k8s\.agent: { "id": "second", "version": "2.1.8", "versions": ["1", "2", "3"], "dotted": {"version": "1.1"}, "numeric_version": "11", optional_version: "my_null_value" } } + + - match: { hits.hits.1._source._doc_count: 2 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.@timestamp: 2021-04-28T19:00:00.000Z } + - match: { hits.hits.1._source.k8s\.agent: { "id": "second", "version": "2.1.10", "versions": ["1", "2", "3"], "dotted": {"version": "1.1"}, "numeric_version": "11", optional_version: "my_null_value" } } + + - match: { hits.hits.2._source._doc_count: 2 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } + - match: { hits.hits.2._source.k8s\.agent: { "id": "first", "version": "2.0.5", "versions": ["1", "2", "3"], "dotted": {"version": "1.1"}, "numeric_version": "11", optional_version: "my_null_value" } } + + - match: { hits.hits.3._source._doc_count: 2 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.@timestamp: 2021-04-28T20:00:00.000Z } + - match: { hits.hits.3._source.k8s\.agent: { "id": "first", "version": "2.0.7", "versions": ["1", "2", "3"], "dotted": {"version": "1.1"}, "numeric_version": "11", optional_version: "my_null_value" } } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/FieldValueFetcher.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/FieldValueFetcher.java index 74375bbe27939..3657e4989ccbd 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/FieldValueFetcher.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/FieldValueFetcher.java @@ -12,6 +12,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper; @@ -65,6 +66,8 @@ private AbstractDownsampleFieldProducer createFieldProducer() { // If field is not a metric, we downsample it as a label if ("histogram".equals(fieldType.typeName())) { return new LabelFieldProducer.HistogramLastLabelFieldProducer(name()); + } else if ("flattened".equals(fieldType.typeName())) { + return new LabelFieldProducer.FlattenedLastValueFieldProducer(name()); } return new LabelFieldProducer.LabelLastValueFieldProducer(name()); } @@ -90,7 +93,13 @@ static List create(SearchExecutionContext context, String[] f } } else { if (context.fieldExistsInIndex(field)) { - final IndexFieldData fieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); + final IndexFieldData fieldData; + if (fieldType instanceof FlattenedFieldMapper.RootFlattenedFieldType flattenedFieldType) { + var keyedFieldType = flattenedFieldType.getKeyedFieldType(); + fieldData = context.getForField(keyedFieldType, MappedFieldType.FielddataOperation.SEARCH); + } else { + fieldData = context.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); + } final String fieldName = context.isMultiField(field) ? fieldType.name().substring(0, fieldType.name().lastIndexOf('.')) : fieldType.name(); diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/LabelFieldProducer.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/LabelFieldProducer.java index 05b4852d0dfd3..b211c5bfb0d12 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/LabelFieldProducer.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/LabelFieldProducer.java @@ -7,8 +7,10 @@ package org.elasticsearch.xpack.downsample; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.fielddata.FormattedDocValues; import org.elasticsearch.index.fielddata.HistogramValue; +import org.elasticsearch.index.mapper.flattened.FlattenedFieldSyntheticWriterHelper; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper.Metric; @@ -141,14 +143,14 @@ public void reset() { } } - static class AggregateMetricFieldProducer extends LabelLastValueFieldProducer { + static final class AggregateMetricFieldProducer extends LabelLastValueFieldProducer { AggregateMetricFieldProducer(String name, Metric metric) { super(name, new LastValueLabel(metric.name())); } } - public static class HistogramLastLabelFieldProducer extends LabelLastValueFieldProducer { + static final class HistogramLastLabelFieldProducer extends LabelLastValueFieldProducer { HistogramLastLabelFieldProducer(String name) { super(name); } @@ -167,4 +169,40 @@ public void write(XContentBuilder builder) throws IOException { } } } + + static final class FlattenedLastValueFieldProducer extends LabelLastValueFieldProducer { + + FlattenedLastValueFieldProducer(String name) { + super(name); + } + + @Override + public void write(XContentBuilder builder) throws IOException { + if (isEmpty() == false) { + builder.startObject(name()); + + var value = label.get(); + List list; + if (value instanceof Object[] values) { + list = new ArrayList<>(values.length); + for (Object v : values) { + list.add(new BytesRef(v.toString())); + } + } else { + list = List.of(new BytesRef(value.toString())); + } + + var iterator = list.iterator(); + var helper = new FlattenedFieldSyntheticWriterHelper(() -> { + if (iterator.hasNext()) { + return iterator.next(); + } else { + return null; + } + }); + helper.write(builder); + builder.endObject(); + } + } + } } diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/LabelFieldProducerTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/LabelFieldProducerTests.java index 469e00f7af9af..844eb1b8e27d8 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/LabelFieldProducerTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/LabelFieldProducerTests.java @@ -7,10 +7,18 @@ package org.elasticsearch.xpack.downsample; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.fielddata.FormattedDocValues; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; public class LabelFieldProducerTests extends AggregatorTestCase { @@ -93,4 +101,50 @@ public Object nextValue() { assertTrue(producer.isEmpty()); assertNull(producer.label().get()); } + + public void testFlattenedLastValueFieldProducer() throws IOException { + var producer = new LabelFieldProducer.FlattenedLastValueFieldProducer("dummy"); + assertTrue(producer.isEmpty()); + assertEquals("dummy", producer.name()); + assertEquals("last_value", producer.label().name()); + + var bytes = List.of("a\0value_a", "b\0value_b", "c\0value_c", "d\0value_d"); + var docValues = new FormattedDocValues() { + + Iterator iterator = bytes.iterator(); + + @Override + public boolean advanceExact(int docId) { + return true; + } + + @Override + public int docValueCount() { + return bytes.size(); + } + + @Override + public Object nextValue() { + return iterator.next(); + } + }; + + producer.collect(docValues, 1); + assertFalse(producer.isEmpty()); + assertEquals("a\0value_a", (((Object[]) producer.label().get())[0]).toString()); + assertEquals("b\0value_b", (((Object[]) producer.label().get())[1]).toString()); + assertEquals("c\0value_c", (((Object[]) producer.label().get())[2]).toString()); + assertEquals("d\0value_d", (((Object[]) producer.label().get())[3]).toString()); + + var builder = new XContentBuilder(XContentType.JSON.xContent(), new ByteArrayOutputStream()); + builder.startObject(); + producer.write(builder); + builder.endObject(); + var content = Strings.toString(builder); + assertThat(content, equalTo("{\"dummy\":{\"a\":\"value_a\",\"b\":\"value_b\",\"c\":\"value_c\",\"d\":\"value_d\"}}")); + + producer.reset(); + assertTrue(producer.isEmpty()); + assertNull(producer.label().get()); + } } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml index 094d9cbf43089..4240467ea4ff3 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/10_connector_put.yml @@ -152,6 +152,19 @@ setup: service_type: super-connector +--- +'Create Connector - Invalid Managed Connector Index Prefix': + - do: + catch: "bad_request" + connector.put: + connector_id: test-connector-test-managed + body: + index_name: wrong-prefix-index + name: my-connector + language: pl + is_native: true + service_type: super-connector + --- 'Create Connector - Id returned as part of response': - do: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml index f804dc02a9e01..b63bf595af5f4 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/130_connector_update_index_name.yml @@ -151,3 +151,18 @@ setup: - match: { index_name: content-search-2-test } +--- +"Update Managed Connector Index Name - Bad Prefix": + - do: + connector.put: + connector_id: test-connector-2 + body: + is_native: true + service_type: super-connector + + - do: + catch: "bad_request" + connector.update_index_name: + connector_id: test-connector-2 + body: + index_name: wrong-prefix-search-2-test diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml index f8cd24d175312..6811c3340ce42 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/140_connector_update_native.yml @@ -73,3 +73,43 @@ setup: field_1: test field_2: something +--- +"Update Connector Native - changing connector to Elastic-managed wrong index name": + + - do: + connector.put: + connector_id: test-connector-1 + body: + is_native: false + index_name: super-connector + + - do: + catch: "bad_request" + connector.update_native: + connector_id: test-connector-1 + body: + is_native: true + +--- +"Update Connector Native - changing connector to Elastic-managed correct index name": + + - do: + connector.put: + connector_id: test-connector-1 + body: + is_native: false + index_name: content-super-connector + + - do: + connector.update_native: + connector_id: test-connector-1 + body: + is_native: true + + - match: { result: updated } + + - do: + connector.get: + connector_id: test-connector-1 + + - match: { is_native: true } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml index 634f99cd53fde..4acca493c42c8 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/connector/15_connector_post.yml @@ -103,6 +103,18 @@ setup: service_type: super-connector +--- +'Create Connector - Invalid Managed Connector Index Prefix': + - do: + catch: "bad_request" + connector.post: + body: + index_name: wrong-prefix-index + name: my-connector + language: pl + is_native: true + service_type: super-connector + --- 'Create Connector - Index name used by another connector': - do: diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index 5e1fde0dfb942..d5d2159d8f373 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -76,6 +76,7 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.application.connector.ConnectorFiltering.fromXContentBytesConnectorFiltering; import static org.elasticsearch.xpack.application.connector.ConnectorFiltering.sortFilteringRulesByOrder; +import static org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry.MANAGED_CONNECTOR_INDEX_PREFIX; /** * A service that manages persistent {@link Connector} configurations. @@ -807,8 +808,8 @@ public void updateConnectorLastSyncStats(UpdateConnectorLastSyncStatsAction.Requ } /** - * Updates the is_native property of a {@link Connector}. It always sets the {@link ConnectorStatus} to - * CONFIGURED. + * Updates the is_native property of a {@link Connector}. It sets the {@link ConnectorStatus} to + * CONFIGURED when connector is in CONNECTED state to indicate that connector needs to reconnect. * * @param request The request for updating the connector's is_native property. * @param listener The listener for handling responses, including successful updates or errors. @@ -816,29 +817,62 @@ public void updateConnectorLastSyncStats(UpdateConnectorLastSyncStatsAction.Requ public void updateConnectorNative(UpdateConnectorNativeAction.Request request, ActionListener listener) { try { String connectorId = request.getConnectorId(); + boolean isNative = request.isNative(); - final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).doc( - new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) - .id(connectorId) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source( - Map.of( - Connector.IS_NATIVE_FIELD.getPreferredName(), - request.isNative(), - Connector.STATUS_FIELD.getPreferredName(), - ConnectorStatus.CONFIGURED.toString() - ) - ) + getConnector(connectorId, listener.delegateFailure((l, connector) -> { - ); - client.update(updateRequest, new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (l, updateResponse) -> { - if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { - l.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); + String indexName = getConnectorIndexNameFromSearchResult(connector); + + boolean doesNotHaveContentPrefix = indexName != null && isValidManagedConnectorIndexName(indexName) == false; + // Ensure attached content index is prefixed correctly + if (isNative && doesNotHaveContentPrefix) { + l.onFailure( + new ElasticsearchStatusException( + "The index name [" + + indexName + + "] attached to the connector [" + + connectorId + + "] must start with the required prefix: [" + + MANAGED_CONNECTOR_INDEX_PREFIX + + "] to be Elastic-managed. Please update the attached index first to comply with this requirement.", + RestStatus.BAD_REQUEST + ) + ); return; } - l.onResponse(updateResponse); - })); + ConnectorStatus status = getConnectorStatusFromSearchResult(connector); + + // If connector was connected already, change its status to CONFIGURED as we need to re-connect + boolean isConnected = status == ConnectorStatus.CONNECTED; + boolean isValidTransitionToConfigured = ConnectorStateMachine.isValidTransition(status, ConnectorStatus.CONFIGURED); + if (isConnected && isValidTransitionToConfigured) { + status = ConnectorStatus.CONFIGURED; + } + + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).setRefreshPolicy( + WriteRequest.RefreshPolicy.IMMEDIATE + ) + .doc( + new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) + .id(connectorId) + .source( + Map.of( + Connector.IS_NATIVE_FIELD.getPreferredName(), + isNative, + Connector.STATUS_FIELD.getPreferredName(), + status.toString() + ) + ) + ); + client.update(updateRequest, new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (ll, updateResponse) -> { + if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { + ll.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); + return; + } + ll.onResponse(updateResponse); + })); + })); } catch (Exception e) { listener.onFailure(e); } @@ -896,22 +930,45 @@ public void updateConnectorIndexName(UpdateConnectorIndexNameAction.Request requ return; } - final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).doc( - new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) - .id(connectorId) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(new HashMap<>() { - { - put(Connector.INDEX_NAME_FIELD.getPreferredName(), request.getIndexName()); - } - }) - ); - client.update(updateRequest, new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (ll, updateResponse) -> { - if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { - ll.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); + getConnector(connectorId, l.delegateFailure((ll, connector) -> { + + Boolean isNativeConnector = getConnectorIsNativeFlagFromSearchResult(connector); + Boolean doesNotHaveContentPrefix = indexName != null && isValidManagedConnectorIndexName(indexName) == false; + + if (isNativeConnector && doesNotHaveContentPrefix) { + ll.onFailure( + new ElasticsearchStatusException( + "Index attached to an Elastic-managed connector must start with the prefix: [" + + MANAGED_CONNECTOR_INDEX_PREFIX + + "]. The index name in the payload [" + + indexName + + "] doesn't comply with this requirement.", + RestStatus.BAD_REQUEST + ) + ); return; } - ll.onResponse(updateResponse); + + final UpdateRequest updateRequest = new UpdateRequest(CONNECTOR_INDEX_NAME, connectorId).doc( + new IndexRequest(CONNECTOR_INDEX_NAME).opType(DocWriteRequest.OpType.INDEX) + .id(connectorId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(new HashMap<>() { + { + put(Connector.INDEX_NAME_FIELD.getPreferredName(), request.getIndexName()); + } + }) + ); + client.update( + updateRequest, + new DelegatingIndexNotFoundActionListener<>(connectorId, listener, (lll, updateResponse) -> { + if (updateResponse.getResult() == UpdateResponse.Result.NOT_FOUND) { + lll.onFailure(new ResourceNotFoundException(connectorNotFoundErrorMsg(connectorId))); + return; + } + lll.onResponse(updateResponse); + }) + ); })); })); @@ -1064,6 +1121,18 @@ private ConnectorStatus getConnectorStatusFromSearchResult(ConnectorSearchResult return ConnectorStatus.connectorStatus((String) searchResult.getResultMap().get(Connector.STATUS_FIELD.getPreferredName())); } + private Boolean getConnectorIsNativeFlagFromSearchResult(ConnectorSearchResult searchResult) { + return (Boolean) searchResult.getResultMap().get(Connector.IS_NATIVE_FIELD.getPreferredName()); + } + + private String getConnectorIndexNameFromSearchResult(ConnectorSearchResult searchResult) { + return (String) searchResult.getResultMap().get(Connector.INDEX_NAME_FIELD.getPreferredName()); + } + + private boolean isValidManagedConnectorIndexName(String indexName) { + return indexName.startsWith(MANAGED_CONNECTOR_INDEX_PREFIX); + } + @SuppressWarnings("unchecked") private Map getConnectorConfigurationFromSearchResult(ConnectorSearchResult searchResult) { return (Map) searchResult.getResultMap().get(Connector.CONFIGURATION_FIELD.getPreferredName()); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index 9b8cc7cfdbe4f..fd35acc89db5c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -45,11 +45,9 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { public static final String ACCESS_CONTROL_INDEX_NAME_PATTERN = ".search-acl-filter-*"; public static final String ACCESS_CONTROL_TEMPLATE_NAME = "search-acl-filter"; - // Pipeline constants - - public static final String ENT_SEARCH_GENERIC_PIPELINE_NAME = "ent-search-generic-ingestion"; - public static final String ENT_SEARCH_GENERIC_PIPELINE_FILE = "generic_ingestion_pipeline"; + public static final String MANAGED_CONNECTOR_INDEX_PREFIX = "content-"; + // Pipeline constants public static final String SEARCH_DEFAULT_PIPELINE_NAME = "search-default-ingestion"; public static final String SEARCH_DEFAULT_PIPELINE_FILE = "search_default_pipeline"; @@ -109,12 +107,6 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { @Override protected List getIngestPipelines() { return List.of( - new JsonIngestPipelineConfig( - ENT_SEARCH_GENERIC_PIPELINE_NAME, - ROOT_RESOURCE_PATH + ENT_SEARCH_GENERIC_PIPELINE_FILE + ".json", - REGISTRY_VERSION, - TEMPLATE_VERSION_VARIABLE - ), new JsonIngestPipelineConfig( SEARCH_DEFAULT_PIPELINE_NAME, ROOT_RESOURCE_PATH + SEARCH_DEFAULT_PIPELINE_FILE + ".json", diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java index 1799121505da5..66f347bc4dbb4 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/ConnectorActionRequest.java @@ -19,6 +19,7 @@ import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry.MANAGED_CONNECTOR_INDEX_PREFIX; /** * Abstract base class for action requests targeting the connectors index. Implements {@link org.elasticsearch.action.IndicesRequest} @@ -52,6 +53,32 @@ public ActionRequestValidationException validateIndexName(String indexName, Acti return validationException; } + /** + * Validates that the given index name starts with the required prefix for Elastic-managed connectors. + * If the index name does not start with the required prefix, the validation exception is updated with an error message. + * + * @param indexName The index name to validate. If null, no validation is performed. + * @param validationException The exception to accumulate validation errors. + * @return The updated or original {@code validationException} with any new validation errors added, + * if the index name does not start with the required prefix. + */ + public ActionRequestValidationException validateManagedConnectorIndexPrefix( + String indexName, + ActionRequestValidationException validationException + ) { + if (indexName != null && indexName.startsWith(MANAGED_CONNECTOR_INDEX_PREFIX) == false) { + return addValidationError( + "Index [" + + indexName + + "] is invalid. Index attached to an Elastic-managed connector must start with the prefix: [" + + MANAGED_CONNECTOR_INDEX_PREFIX + + "]", + validationException + ); + } + return validationException; + } + @Override public String[] indices() { return new String[] { ConnectorTemplateRegistry.CONNECTOR_INDEX_NAME_PATTERN }; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java index fad349cd31877..b1c38637298c4 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java @@ -127,6 +127,10 @@ public ActionRequestValidationException validate() { validationException = validateIndexName(indexName, validationException); + if (Boolean.TRUE.equals(isNative)) { + validationException = validateManagedConnectorIndexPrefix(indexName, validationException); + } + return validationException; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java index 687a801ab8fd6..f3e8ed6b6e76d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java @@ -147,6 +147,10 @@ public ActionRequestValidationException validate() { validationException = validateIndexName(indexName, validationException); + if (Boolean.TRUE.equals(isNative)) { + validationException = validateManagedConnectorIndexPrefix(indexName, validationException); + } + return validationException; } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java index 12abca3a78591..28d4fe0956d03 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIndexServiceTests.java @@ -56,6 +56,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry.MANAGED_CONNECTOR_INDEX_PREFIX; import static org.elasticsearch.xpack.application.connector.ConnectorTestUtils.getRandomConnectorFeatures; import static org.elasticsearch.xpack.application.connector.ConnectorTestUtils.getRandomCronExpression; import static org.elasticsearch.xpack.application.connector.ConnectorTestUtils.randomConnectorFeatureEnabled; @@ -648,8 +649,8 @@ public void testUpdateConnectorScheduling_OnlyFullSchedule() throws Exception { assertThat(initialScheduling.getIncremental(), equalTo(indexedConnector.getScheduling().getIncremental())); } - public void testUpdateConnectorIndexName() throws Exception { - Connector connector = ConnectorTestUtils.getRandomConnector(); + public void testUpdateConnectorIndexName_ForSelfManagedConnector() throws Exception { + Connector connector = ConnectorTestUtils.getRandomSelfManagedConnector(); String connectorId = randomUUID(); ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); @@ -669,8 +670,8 @@ public void testUpdateConnectorIndexName() throws Exception { assertThat(newIndexName, equalTo(indexedConnector.getIndexName())); } - public void testUpdateConnectorIndexName_WithTheSameIndexName() throws Exception { - Connector connector = ConnectorTestUtils.getRandomConnector(); + public void testUpdateConnectorIndexName_ForSelfManagedConnector_WithTheSameIndexName() throws Exception { + Connector connector = ConnectorTestUtils.getRandomSelfManagedConnector(); String connectorId = randomUUID(); ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); @@ -685,6 +686,42 @@ public void testUpdateConnectorIndexName_WithTheSameIndexName() throws Exception assertThat(updateResponse.getResult(), equalTo(DocWriteResponse.Result.NOOP)); } + public void testUpdateConnectorIndexName_ForManagedConnector_WithIllegalIndexName() throws Exception { + Connector connector = ConnectorTestUtils.getRandomElasticManagedConnector(); + String connectorId = randomUUID(); + + ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); + assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); + + UpdateConnectorIndexNameAction.Request updateIndexNameRequest = new UpdateConnectorIndexNameAction.Request( + connectorId, + "wrong-prefix-" + randomAlphaOfLengthBetween(3, 10) + ); + + expectThrows(ElasticsearchStatusException.class, () -> awaitUpdateConnectorIndexName(updateIndexNameRequest)); + } + + public void testUpdateConnectorIndexName_ForManagedConnector_WithPrefixedIndexName() throws Exception { + Connector connector = ConnectorTestUtils.getRandomElasticManagedConnector(); + String connectorId = randomUUID(); + + ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); + assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); + + String newIndexName = MANAGED_CONNECTOR_INDEX_PREFIX + randomAlphaOfLengthBetween(3, 10); + + UpdateConnectorIndexNameAction.Request updateIndexNameRequest = new UpdateConnectorIndexNameAction.Request( + connectorId, + newIndexName + ); + + DocWriteResponse updateResponse = awaitUpdateConnectorIndexName(updateIndexNameRequest); + assertThat(updateResponse.status(), equalTo(RestStatus.OK)); + + Connector indexedConnector = awaitGetConnector(connectorId); + assertThat(newIndexName, equalTo(indexedConnector.getIndexName())); + } + public void testUpdateConnectorServiceType() throws Exception { Connector connector = ConnectorTestUtils.getRandomConnector(); String connectorId = randomUUID(); @@ -756,7 +793,7 @@ public void testUpdateConnectorNameOrDescription() throws Exception { } public void testUpdateConnectorNative() throws Exception { - Connector connector = ConnectorTestUtils.getRandomConnector(); + Connector connector = ConnectorTestUtils.getRandomConnectorWithDetachedIndex(); String connectorId = randomUUID(); ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); @@ -773,6 +810,39 @@ public void testUpdateConnectorNative() throws Exception { assertThat(isNative, equalTo(indexedConnector.isNative())); } + public void testUpdateConnectorNativeTrue_WhenIllegalIndexPrefix() throws Exception { + Connector connector = ConnectorTestUtils.getRandomConnectorWithAttachedIndex("wrong-prefix-" + randomAlphaOfLength(10)); + String connectorId = randomUUID(); + + ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); + assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); + + boolean isNative = true; + + UpdateConnectorNativeAction.Request updateNativeRequest = new UpdateConnectorNativeAction.Request(connectorId, isNative); + + expectThrows(ElasticsearchStatusException.class, () -> awaitUpdateConnectorNative(updateNativeRequest)); + } + + public void testUpdateConnectorNativeTrue_WithCorrectIndexPrefix() throws Exception { + Connector connector = ConnectorTestUtils.getRandomConnectorWithAttachedIndex( + MANAGED_CONNECTOR_INDEX_PREFIX + randomAlphaOfLength(10) + ); + String connectorId = randomUUID(); + + ConnectorCreateActionResponse resp = awaitCreateConnector(connectorId, connector); + assertThat(resp.status(), anyOf(equalTo(RestStatus.CREATED), equalTo(RestStatus.OK))); + + boolean isNative = true; + + UpdateConnectorNativeAction.Request updateNativeRequest = new UpdateConnectorNativeAction.Request(connectorId, isNative); + DocWriteResponse updateResponse = awaitUpdateConnectorNative(updateNativeRequest); + assertThat(updateResponse.status(), equalTo(RestStatus.OK)); + + Connector indexedConnector = awaitGetConnector(connectorId); + assertThat(isNative, equalTo(indexedConnector.isNative())); + } + public void testUpdateConnectorStatus() throws Exception { Connector connector = ConnectorTestUtils.getRandomConnector(); String connectorId = randomUUID(); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java index f4a92e51e8c6a..c3d4bf8b72ff5 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorIngestPipelineTests.java @@ -50,7 +50,7 @@ public void testToXContent() throws IOException { String content = XContentHelper.stripWhitespace(""" { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java index a4c7015afafcb..068b99626af9d 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java @@ -132,10 +132,7 @@ public void testThatNonExistingComponentTemplatesAreAddedImmediately() throws Ex ClusterChangedEvent event = createClusterChangedEvent( Collections.emptyMap(), Collections.emptyMap(), - Collections.singletonMap( - ConnectorTemplateRegistry.ENT_SEARCH_GENERIC_PIPELINE_NAME, - ConnectorTemplateRegistry.REGISTRY_VERSION - ), + Collections.singletonMap(ConnectorTemplateRegistry.SEARCH_DEFAULT_PIPELINE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION), Collections.emptyMap(), nodes ); @@ -169,10 +166,7 @@ public void testThatVersionedOldComponentTemplatesAreUpgraded() throws Exception ConnectorTemplateRegistry.CONNECTOR_TEMPLATE_NAME + "-settings", ConnectorTemplateRegistry.REGISTRY_VERSION - 1 ), - Collections.singletonMap( - ConnectorTemplateRegistry.ENT_SEARCH_GENERIC_PIPELINE_NAME, - ConnectorTemplateRegistry.REGISTRY_VERSION - ), + Collections.singletonMap(ConnectorTemplateRegistry.SEARCH_DEFAULT_PIPELINE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION), Collections.emptyMap(), nodes ); @@ -189,10 +183,7 @@ public void testThatUnversionedOldComponentTemplatesAreUpgraded() throws Excepti ClusterChangedEvent event = createClusterChangedEvent( Collections.emptyMap(), Collections.singletonMap(ConnectorTemplateRegistry.CONNECTOR_TEMPLATE_NAME + "-mappings", null), - Collections.singletonMap( - ConnectorTemplateRegistry.ENT_SEARCH_GENERIC_PIPELINE_NAME, - ConnectorTemplateRegistry.REGISTRY_VERSION - ), + Collections.singletonMap(ConnectorTemplateRegistry.SEARCH_DEFAULT_PIPELINE_NAME, ConnectorTemplateRegistry.REGISTRY_VERSION), Collections.emptyMap(), nodes ); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java index f052ef79d82fb..c563bc0a14ee3 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -343,6 +343,18 @@ public static Connector getRandomConnectorWithDetachedIndex() { return getRandomConnectorBuilder().setIndexName(null).build(); } + public static Connector getRandomConnectorWithAttachedIndex(String indexName) { + return getRandomConnectorBuilder().setIndexName(indexName).build(); + } + + public static Connector getRandomSelfManagedConnector() { + return getRandomConnectorBuilder().setIsNative(false).build(); + } + + public static Connector getRandomElasticManagedConnector() { + return getRandomConnectorBuilder().setIsNative(true).build(); + } + public static Connector getRandomConnectorWithServiceTypeNotDefined() { return getRandomConnectorBuilder().setServiceType(null).build(); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index 734c6eaf86965..bcb647d978abb 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -225,7 +225,7 @@ public void testToXContent() throws IOException { "name":"test-name", "pipeline":{ "extract_binary_content":true, - "name":"ent-search-generic-ingestion", + "name":"search-default-ingestion", "reduce_whitespace":true, "run_ml_inference":false }, @@ -286,7 +286,7 @@ public void testToContent_WithNullValues() throws IOException { "name": null, "pipeline":{ "extract_binary_content":true, - "name":"ent-search-generic-ingestion", + "name":"search-default-ingestion", "reduce_whitespace":true, "run_ml_inference":false }, @@ -350,7 +350,7 @@ public void testToXContent_withOptionalFieldsMissing() throws IOException { "name": null, "pipeline":{ "extract_binary_content":true, - "name":"ent-search-generic-ingestion", + "name":"search-default-ingestion", "reduce_whitespace":true, "run_ml_inference":false }, diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PostConnectorActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PostConnectorActionTests.java index 0f0e83f2b9c51..e482bf3f6bb74 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PostConnectorActionTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PostConnectorActionTests.java @@ -20,7 +20,7 @@ public void testValidate_WhenConnectorIdAndIndexNamePresent_ExpectNoValidationEr PostConnectorAction.Request request = new PostConnectorAction.Request( randomAlphaOfLength(10), randomAlphaOfLength(10), - randomBoolean(), + false, randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10) @@ -30,6 +30,24 @@ public void testValidate_WhenConnectorIdAndIndexNamePresent_ExpectNoValidationEr assertThat(exception, nullValue()); } + public void testValidate_WrongIndexNamePresentForManagedConnector_ExpectValidationError() { + PostConnectorAction.Request requestWithIllegalIndexName = new PostConnectorAction.Request( + randomAlphaOfLength(10), + "wrong-prefix-" + randomAlphaOfLength(10), + true, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + ActionRequestValidationException exception = requestWithIllegalIndexName.validate(); + + assertThat(exception, notNullValue()); + assertThat( + exception.getMessage(), + containsString("Index attached to an Elastic-managed connector must start with the prefix: [content-]") + ); + } + public void testValidate_WhenMalformedIndexName_ExpectValidationError() { PostConnectorAction.Request requestWithMissingConnectorId = new PostConnectorAction.Request( randomAlphaOfLength(10), diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionTests.java index 873e102e40931..10ab049413565 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/action/PutConnectorActionTests.java @@ -21,7 +21,7 @@ public void testValidate_WhenConnectorIdAndIndexNamePresent_ExpectNoValidationEr randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), - randomBoolean(), + false, randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10) @@ -31,6 +31,25 @@ public void testValidate_WhenConnectorIdAndIndexNamePresent_ExpectNoValidationEr assertThat(exception, nullValue()); } + public void testValidate_WrongIndexNamePresentForManagedConnector_ExpectValidationError() { + PutConnectorAction.Request requestWithIllegalIndexName = new PutConnectorAction.Request( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + "wrong-prefix-" + randomAlphaOfLength(10), + true, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + ActionRequestValidationException exception = requestWithIllegalIndexName.validate(); + + assertThat(exception, notNullValue()); + assertThat( + exception.getMessage(), + containsString("Index attached to an Elastic-managed connector must start with the prefix: [content-]") + ); + } + public void testValidate_WhenMalformedIndexName_ExpectValidationError() { PutConnectorAction.Request requestWithMissingConnectorId = new PutConnectorAction.Request( randomAlphaOfLength(10), diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java index 81b05ce25e177..ed3338c715bdf 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java @@ -77,7 +77,7 @@ public void testFromXContent_WithAllFields_AllSet() throws IOException { "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, @@ -160,7 +160,7 @@ public void testFromXContent_WithOnlyNonNullableFieldsSet_DoesNotThrow() throws "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, @@ -218,7 +218,7 @@ public void testFromXContent_WithAllNullableFieldsSetToNull_DoesNotThrow() throw "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, @@ -275,7 +275,7 @@ public void testSyncJobConnectorFromXContent_WithAllFieldsSet() throws IOExcepti "language": "english", "pipeline": { "extract_binary_content": true, - "name": "ent-search-generic-ingestion", + "name": "search-default-ingestion", "reduce_whitespace": true, "run_ml_inference": false }, diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java index 90244d9b2c019..3557114e2f4c7 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java @@ -33,6 +33,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + public abstract class BaseEqlSpecTestCase extends RemoteClusterAwareEqlRestTestCase { protected static final String PARAM_FORMATTING = "%2$s"; @@ -52,6 +55,9 @@ public abstract class BaseEqlSpecTestCase extends RemoteClusterAwareEqlRestTestC */ private final int size; private final int maxSamplesPerKey; + private final Boolean allowPartialSearchResults; + private final Boolean allowPartialSequenceResults; + private final Boolean expectShardFailures; @Before public void setup() throws Exception { @@ -104,7 +110,16 @@ protected static List asArray(List specs) { } results.add( - new Object[] { spec.query(), name, spec.expectedEventIds(), spec.joinKeys(), spec.size(), spec.maxSamplesPerKey() } + new Object[] { + spec.query(), + name, + spec.expectedEventIds(), + spec.joinKeys(), + spec.size(), + spec.maxSamplesPerKey(), + spec.allowPartialSearchResults(), + spec.allowPartialSequenceResults(), + spec.expectShardFailures() } ); } @@ -118,7 +133,10 @@ protected static List asArray(List specs) { List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { this.index = index; @@ -128,6 +146,9 @@ protected static List asArray(List specs) { this.joinKeys = joinKeys; this.size = size == null ? -1 : size; this.maxSamplesPerKey = maxSamplesPerKey == null ? -1 : maxSamplesPerKey; + this.allowPartialSearchResults = allowPartialSearchResults; + this.allowPartialSequenceResults = allowPartialSequenceResults; + this.expectShardFailures = expectShardFailures; } public void test() throws Exception { @@ -137,6 +158,7 @@ public void test() throws Exception { private void assertResponse(ObjectPath response) throws Exception { List> events = response.evaluate("hits.events"); List> sequences = response.evaluate("hits.sequences"); + Object shardFailures = response.evaluate("shard_failures"); if (events != null) { assertEvents(events); @@ -145,6 +167,7 @@ private void assertResponse(ObjectPath response) throws Exception { } else { fail("No events or sequences found"); } + assertShardFailures(shardFailures); } protected ObjectPath runQuery(String index, String query) throws Exception { @@ -163,6 +186,32 @@ protected ObjectPath runQuery(String index, String query) throws Exception { if (maxSamplesPerKey > 0) { builder.field("max_samples_per_key", maxSamplesPerKey); } + boolean allowPartialResultsInBody = randomBoolean(); + if (allowPartialSearchResults != null) { + if (allowPartialResultsInBody) { + builder.field("allow_partial_search_results", String.valueOf(allowPartialSearchResults)); + if (allowPartialSequenceResults != null) { + builder.field("allow_partial_sequence_results", String.valueOf(allowPartialSequenceResults)); + } + } else { + // these will be overwritten by the path params, that have higher priority than the query (JSON body) params + if (allowPartialSearchResults != null) { + builder.field("allow_partial_search_results", randomBoolean()); + } + if (allowPartialSequenceResults != null) { + builder.field("allow_partial_sequence_results", randomBoolean()); + } + } + } else { + // Tests that don't specify a setting for these parameters should always pass. + // These params should be irrelevant. + if (randomBoolean()) { + builder.field("allow_partial_search_results", randomBoolean()); + } + if (randomBoolean()) { + builder.field("allow_partial_sequence_results", randomBoolean()); + } + } builder.endObject(); Request request = new Request("POST", "/" + index + "/_eql/search"); @@ -170,6 +219,23 @@ protected ObjectPath runQuery(String index, String query) throws Exception { if (ccsMinimizeRoundtrips != null) { request.addParameter("ccs_minimize_roundtrips", ccsMinimizeRoundtrips.toString()); } + if (allowPartialSearchResults != null) { + if (allowPartialResultsInBody == false) { + request.addParameter("allow_partial_search_results", String.valueOf(allowPartialSearchResults)); + if (allowPartialSequenceResults != null) { + request.addParameter("allow_partial_sequence_results", String.valueOf(allowPartialSequenceResults)); + } + } + } else { + // Tests that don't specify a setting for these parameters should always pass. + // These params should be irrelevant. + if (randomBoolean()) { + request.addParameter("allow_partial_search_results", String.valueOf(randomBoolean())); + } + if (randomBoolean()) { + request.addParameter("allow_partial_sequence_results", String.valueOf(randomBoolean())); + } + } int timeout = Math.toIntExact(timeout().millis()); RequestConfig config = RequestConfig.copy(RequestConfig.DEFAULT) .setConnectionRequestTimeout(timeout) @@ -182,6 +248,20 @@ protected ObjectPath runQuery(String index, String query) throws Exception { return ObjectPath.createFromResponse(client().performRequest(request)); } + private void assertShardFailures(Object shardFailures) { + if (expectShardFailures != null) { + if (expectShardFailures) { + assertNotNull(shardFailures); + List list = (List) shardFailures; + assertThat(list.size(), is(greaterThan(0))); + } else { + assertNull(shardFailures); + } + } else { + assertNull(shardFailures); + } + } + private void assertEvents(List> events) { assertNotNull(events); logger.debug("Events {}", new Object() { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java index 1d51af574c810..4618bd8f4ff3d 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java @@ -52,6 +52,7 @@ */ public class DataLoader { public static final String TEST_INDEX = "endgame-140"; + public static final String TEST_SHARD_FAILURES_INDEX = "endgame-shard-failures"; public static final String TEST_EXTRA_INDEX = "extra"; public static final String TEST_NANOS_INDEX = "endgame-140-nanos"; public static final String TEST_SAMPLE = "sample1,sample2,sample3"; @@ -103,6 +104,11 @@ public static void loadDatasetIntoEs(RestClient client, CheckedBiFunction eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_NANOS_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_NANOS_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,9 +54,23 @@ public EqlDateNanosSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java index 292fe6c895cee..cc858ded25f37 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlExtraSpecTestCase.java @@ -27,9 +27,23 @@ public EqlExtraSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_EXTRA_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_EXTRA_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,9 +54,23 @@ public EqlExtraSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java index cdda9e9e068f5..f62c2b29101db 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlMissingEventsSpecTestCase.java @@ -27,9 +27,23 @@ public EqlMissingEventsSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_MISSING_EVENTS_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_MISSING_EVENTS_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,9 +54,23 @@ public EqlMissingEventsSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @Override diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java index 6471e264a92fa..a38ccacb42f5f 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleMultipleEntriesTestCase.java @@ -21,9 +21,23 @@ public EqlSampleMultipleEntriesTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - this(TEST_SAMPLE_MULTI, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + this( + TEST_SAMPLE_MULTI, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } public EqlSampleMultipleEntriesTestCase( @@ -33,9 +47,23 @@ public EqlSampleMultipleEntriesTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java index dfae73b3602a7..4748bd0e3307b 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSampleTestCase.java @@ -15,8 +15,29 @@ public abstract class EqlSampleTestCase extends BaseEqlSpecTestCase { - public EqlSampleTestCase(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - this(TEST_SAMPLE, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSampleTestCase( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + this( + TEST_SAMPLE, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } public EqlSampleTestCase( @@ -26,9 +47,23 @@ public EqlSampleTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java index db7ee05ff2239..4dd617bac0abd 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpec.java @@ -30,6 +30,9 @@ public class EqlSpec { private Integer size; private Integer maxSamplesPerKey; + private Boolean allowPartialSearchResults; + private Boolean allowPartialSequenceResults; + private Boolean expectShardFailures; public String name() { return name; @@ -103,6 +106,30 @@ public void maxSamplesPerKey(Integer maxSamplesPerKey) { this.maxSamplesPerKey = maxSamplesPerKey; } + public Boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public void allowPartialSearchResults(Boolean allowPartialSearchResults) { + this.allowPartialSearchResults = allowPartialSearchResults; + } + + public Boolean allowPartialSequenceResults() { + return allowPartialSequenceResults; + } + + public void allowPartialSequenceResults(Boolean allowPartialSequenceResults) { + this.allowPartialSequenceResults = allowPartialSequenceResults; + } + + public Boolean expectShardFailures() { + return expectShardFailures; + } + + public void expectShardFailures(Boolean expectShardFailures) { + this.expectShardFailures = expectShardFailures; + } + @Override public String toString() { String str = ""; @@ -132,7 +159,15 @@ public String toString() { if (maxSamplesPerKey != null) { str = appendWithComma(str, "max_samples_per_key", "" + maxSamplesPerKey); } - + if (allowPartialSearchResults != null) { + str = appendWithComma(str, "allow_partial_search_results", String.valueOf(allowPartialSearchResults)); + } + if (allowPartialSequenceResults != null) { + str = appendWithComma(str, "allow_partial_sequence_results", String.valueOf(allowPartialSequenceResults)); + } + if (expectShardFailures != null) { + str = appendWithComma(str, "expect_shard_failures", String.valueOf(expectShardFailures)); + } return str; } @@ -150,12 +185,22 @@ public boolean equals(Object other) { return Objects.equals(this.query(), that.query()) && Objects.equals(size, that.size) - && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey); + && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults) + && Objects.equals(allowPartialSequenceResults, that.allowPartialSequenceResults) + && Objects.equals(expectShardFailures, that.expectShardFailures); } @Override public int hashCode() { - return Objects.hash(this.query, size, maxSamplesPerKey); + return Objects.hash( + this.query, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } private static String appendWithComma(String str, String name, String append) { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecFailingShardsTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecFailingShardsTestCase.java new file mode 100644 index 0000000000000..c490a2f703dcc --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecFailingShardsTestCase.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.test.eql; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import java.util.List; + +import static org.elasticsearch.test.eql.DataLoader.TEST_INDEX; +import static org.elasticsearch.test.eql.DataLoader.TEST_SHARD_FAILURES_INDEX; + +public abstract class EqlSpecFailingShardsTestCase extends BaseEqlSpecTestCase { + + @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) + public static List readTestSpecs() throws Exception { + + // Load EQL validation specs + return asArray(EqlSpecLoader.load("/test_failing_shards.toml")); + } + + @Override + protected String tiebreaker() { + return "serial_event_id"; + } + + // constructor for "local" rest tests + public EqlSpecFailingShardsTestCase( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + this( + TEST_INDEX + "," + TEST_SHARD_FAILURES_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); + } + + // constructor for multi-cluster tests + public EqlSpecFailingShardsTestCase( + String index, + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearch, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearch, + allowPartialSequenceResults, + expectShardFailures + ); + } +} diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java index a1f555563e29c..f86107cf3bac5 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecLoader.java @@ -76,6 +76,10 @@ private static Integer getInteger(TomlTable table, String key) { return null; } + private static Boolean getBoolean(TomlTable table, String key) { + return table.getBoolean(key); + } + private static List readFromStream(InputStream is, Set uniqueTestNames) throws Exception { List testSpecs = new ArrayList<>(); @@ -90,6 +94,9 @@ private static List readFromStream(InputStream is, Set uniqueTe spec.note(getTrimmedString(table, "note")); spec.description(getTrimmedString(table, "description")); spec.size(getInteger(table, "size")); + spec.allowPartialSearchResults(getBoolean(table, "allow_partial_search_results")); + spec.allowPartialSequenceResults(getBoolean(table, "allow_partial_sequence_results")); + spec.expectShardFailures(getBoolean(table, "expect_shard_failures")); List arr = table.getList("tags"); if (arr != null) { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java index 7113924f79029..62a3ea72fe51f 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlSpecTestCase.java @@ -28,8 +28,29 @@ protected String tiebreaker() { } // constructor for "local" rest tests - public EqlSpecTestCase(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - this(TEST_INDEX, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSpecTestCase( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearch, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + this( + TEST_INDEX, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearch, + allowPartialSequenceResults, + expectShardFailures + ); } // constructor for multi-cluster tests @@ -40,8 +61,22 @@ public EqlSpecTestCase( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearch, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(index, query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + index, + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearch, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.data b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.data new file mode 100644 index 0000000000000..18a1d05656d09 --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.data @@ -0,0 +1,14 @@ +[ + { + "event_subtype_full": "already_running", + "event_type": "process", + "event_type_full": "process_event", + "opcode": 3, + "pid": 0, + "process_name": "System Idle Process", + "serial_event_id": 10000, + "subtype": "create", + "timestamp": 117444736000000000, + "unique_pid": 1 + } +] diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.mapping b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.mapping new file mode 100644 index 0000000000000..3b5039f4098af --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/resources/data/endgame-shard-failures.mapping @@ -0,0 +1,105 @@ +# Text patterns like "[runtime_random_keyword_type]" will get replaced at runtime with a random string type. +# See DataLoader class for pattern replacements. +{ + "runtime":{ + "broken":{ + "type": "long", + "script": { + "lang": "painless", + "source": "emit(doc['non_existing'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" + } + } + }, + "properties" : { + "command_line" : { + "type" : "[runtime_random_keyword_type]" + }, + "event_type" : { + "type" : "[runtime_random_keyword_type]" + }, + "event" : { + "properties" : { + "category" : { + "type" : "alias", + "path" : "event_type" + }, + "sequence" : { + "type" : "alias", + "path" : "serial_event_id" + } + } + }, + "md5" : { + "type" : "[runtime_random_keyword_type]" + }, + "parent_process_name": { + "type" : "[runtime_random_keyword_type]" + }, + "parent_process_path": { + "type" : "[runtime_random_keyword_type]" + }, + "pid" : { + "type" : "long" + }, + "ppid" : { + "type" : "long" + }, + "process_name": { + "type" : "[runtime_random_keyword_type]" + }, + "process_path": { + "type" : "[runtime_random_keyword_type]" + }, + "subtype" : { + "type" : "[runtime_random_keyword_type]" + }, + "timestamp" : { + "type" : "date" + }, + "@timestamp" : { + "type" : "date" + }, + "user" : { + "type" : "[runtime_random_keyword_type]" + }, + "user_name" : { + "type" : "[runtime_random_keyword_type]" + }, + "user_domain": { + "type" : "[runtime_random_keyword_type]" + }, + "hostname" : { + "type" : "text", + "fields" : { + "[runtime_random_keyword_type]" : { + "type" : "[runtime_random_keyword_type]", + "ignore_above" : 256 + } + } + }, + "opcode" : { + "type" : "long" + }, + "file_name" : { + "type" : "text", + "fields" : { + "[runtime_random_keyword_type]" : { + "type" : "[runtime_random_keyword_type]", + "ignore_above" : 256 + } + } + }, + "file_path" : { + "type" : "[runtime_random_keyword_type]" + }, + "serial_event_id" : { + "type" : "long" + }, + "source_address" : { + "type" : "ip" + }, + "exit_code" : { + "type" : "long" + } + } +} diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/test_failing_shards.toml b/x-pack/plugin/eql/qa/common/src/main/resources/test_failing_shards.toml new file mode 100644 index 0000000000000..a551c66fd48bd --- /dev/null +++ b/x-pack/plugin/eql/qa/common/src/main/resources/test_failing_shards.toml @@ -0,0 +1,173 @@ +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "eventQueryNoShardFailures" +query = 'process where serial_event_id == 1' +allow_partial_search_results = true +expected_event_ids = [1] +expect_shard_failures = false + + +[[queries]] +name = "eventQueryShardFailures" +query = 'process where serial_event_id == 1 or broken == 1' +allow_partial_search_results = true +expected_event_ids = [1] +expect_shard_failures = true + + +[[queries]] +name = "eventQueryShardFailuresOptionalField" +query = 'process where serial_event_id == 1 and ?optional_field_default_null == null or broken == 1' +allow_partial_search_results = true +expected_event_ids = [1] +expect_shard_failures = true + + +[[queries]] +name = "eventQueryShardFailuresOptionalFieldMatching" +query = 'process where serial_event_id == 2 and ?subtype == "create" or broken == 1' +allow_partial_search_results = true +expected_event_ids = [2] +expect_shard_failures = true + + +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "sequenceQueryNoShardFailures" +query = ''' +sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2] +''' +expected_event_ids = [1, 2] +expect_shard_failures = false + + +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "sequenceQueryNoShardFailuresAllowFalse" +query = ''' +sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = false +expected_event_ids = [1, 2] +expect_shard_failures = false + + +# this query doesn't touch the "broken" field, so it should not fail +[[queries]] +name = "sequenceQueryNoShardFailuresAllowTrue" +query = ''' +sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [1, 2] +expect_shard_failures = false + + +[[queries]] +name = "sequenceQueryMissingShards" +query = ''' +sequence + [process where serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResults" +query = ''' +sequence + [process where serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptional" +query = ''' +sequence + [process where ?serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptional2" +query = ''' +sequence with maxspan=100000d + [process where serial_event_id == 1 and ?subtype == "create" or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptionalMissing" +query = ''' +sequence with maxspan=100000d + [process where serial_event_id == 1 and ?subtype == "create"] + ![process where broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, -1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sequenceQueryMissingShardsPartialResultsOptionalMissing2" +query = ''' +sequence with maxspan=100000d + [process where serial_event_id == 1 and ?subtype == "create" or broken == 1] + ![process where broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +allow_partial_sequence_results = true +expected_event_ids = [1, -1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sampleQueryMissingShardsPartialResults" +query = ''' +sample by event_subtype_full + [process where serial_event_id == 1 or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + + +[[queries]] +name = "sampleQueryMissingShardsPartialResultsOptional" +query = ''' +sample by event_subtype_full + [process where serial_event_id == 1 and ?subtype == "create" or broken == 1] + [process where serial_event_id == 2] +''' +allow_partial_search_results = true +expected_event_ids = [1, 2] +expect_shard_failures = true + diff --git a/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java b/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java index 2a29572374fa8..60c7fb1c7ad25 100644 --- a/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java +++ b/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java @@ -407,7 +407,16 @@ private void assertMultiValueFunctionQuery( for (int id : ids) { eventIds.add(String.valueOf(id)); } - request.setJsonEntity("{\"query\":\"" + query + "\"}"); + + StringBuilder payload = new StringBuilder("{\"query\":\"" + query + "\""); + if (randomBoolean()) { + payload.append(", \"allow_partial_search_results\": true"); + } + if (randomBoolean()) { + payload.append(", \"allow_partial_sequence_results\": true"); + } + payload.append("}"); + request.setJsonEntity(payload.toString()); assertResponse(query, eventIds, runEql(client, request)); testedFunctions.add(functionName); } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java index c20968871472f..5d6824232d80f 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlDateNanosIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterIndex(TEST_NANOS_INDEX), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlDateNanosIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterIndex(TEST_NANOS_INDEX), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java index 774c19d02adf0..79b095434814b 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlExtraIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterIndex(TEST_EXTRA_INDEX), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlExtraIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterIndex(TEST_EXTRA_INDEX), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java index 1502c250bd058..7673eec32ec55 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlSampleIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterPattern(TEST_SAMPLE), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSampleIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterPattern(TEST_SAMPLE), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java index 795fe4e103a31..ac6f7fe508c99 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java @@ -43,8 +43,22 @@ public EqlSampleMultipleEntriesIT( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(remoteClusterPattern(TEST_SAMPLE_MULTI), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + remoteClusterPattern(TEST_SAMPLE_MULTI), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java index 2cddecb644a1a..db0c03e8fdb6f 100644 --- a/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java +++ b/x-pack/plugin/eql/qa/multi-cluster-with-security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java @@ -37,7 +37,28 @@ protected String getRemoteCluster() { return REMOTE_CLUSTER.getHttpAddresses(); } - public EqlSpecIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(remoteClusterIndex(TEST_INDEX), query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSpecIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + remoteClusterIndex(TEST_INDEX), + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java index 1df10fde7fde5..5e1fa224de58d 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java @@ -27,7 +27,27 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlDateNanosIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlDateNanosIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java index 8af8fcac087b5..cb92eddeb0410 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java @@ -27,7 +27,27 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlExtraIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlExtraIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java index 05557fb4883b3..4f1faf3322e7f 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java @@ -27,8 +27,28 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlMissingEventsIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlMissingEventsIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java index dc2c653fad89e..c0bce3ffc9e4f 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java @@ -27,8 +27,28 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlSampleIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSampleIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java index af1ade9120bbd..f50ee36095ae0 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java @@ -33,9 +33,22 @@ public EqlSampleMultipleEntriesIT( List eventIds, String[] joinKeys, Integer size, - Integer maxSamplesPerKey + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures ) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecFailingShardsIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecFailingShardsIT.java new file mode 100644 index 0000000000000..cf05811a77857 --- /dev/null +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecFailingShardsIT.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.eql.EqlSpecFailingShardsTestCase; +import org.junit.ClassRule; + +import java.util.List; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EqlSpecFailingShardsIT extends EqlSpecFailingShardsTestCase { + + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.CLUSTER; + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public EqlSpecFailingShardsIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); + } +} diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java index 7aac0ae336c8a..0aad5cc1b73da 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java @@ -27,7 +27,27 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - public EqlSpecIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { - super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); + public EqlSpecIT( + String query, + String name, + List eventIds, + String[] joinKeys, + Integer size, + Integer maxSamplesPerKey, + Boolean allowPartialSearchResults, + Boolean allowPartialSequenceResults, + Boolean expectShardFailures + ) { + super( + query, + name, + eventIds, + joinKeys, + size, + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults, + expectShardFailures + ); } } diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml index e49264d76d5e9..c7974f3b584b4 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml @@ -83,6 +83,34 @@ setup: id: 123 valid: true + - do: + indices.create: + index: eql_test_rebel + body: + mappings: + properties: + some_keyword: + type: keyword + runtime: + day_of_week: + type: keyword + script: + source: "throw new IllegalArgumentException(\"rebel shards\")" + - do: + bulk: + refresh: true + body: + - index: + _index: eql_test_rebel + _id: "1" + - event: + - category: process + "@timestamp": 2020-02-03T12:34:56Z + user: SYSTEM + id: 123 + valid: false + some_keyword: longer than normal + --- # Testing round-trip and the basic shape of the response "Execute some EQL.": @@ -478,3 +506,118 @@ setup: query: 'sequence with maxspan=10d [network where user == "ADMIN"] ![network where used == "SYSTEM"]' - match: { error.root_cause.0.type: "verification_exception" } - match: { error.root_cause.0.reason: "Found 1 problem\nline 1:75: Unknown column [used], did you mean [user]?" } + + +--- +"Execute query shard failures and with allow_partial_search_results": + - do: + eql.search: + index: eql_test* + body: + query: 'process where user == "SYSTEM" and day_of_week == "Monday"' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + allow_partial_search_results: true + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.user: "SYSTEM"} + - match: {hits.events.0._id: "1"} + - match: {hits.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.events.0.fields.id: [123]} + - match: {hits.events.0.fields.valid: [false]} + - match: {hits.events.0.fields.day_of_week: ["Monday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute query shard failures and with allow_partial_search_results as request param": + - do: + eql.search: + index: eql_test* + allow_partial_search_results: true + body: + query: 'process where user == "SYSTEM" and day_of_week == "Monday"' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.user: "SYSTEM"} + - match: {hits.events.0._id: "1"} + - match: {hits.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.events.0.fields.id: [123]} + - match: {hits.events.0.fields.valid: [false]} + - match: {hits.events.0.fields.day_of_week: ["Monday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute sequence with shard failures and allow_partial_search_results=true": + - do: + eql.search: + index: eql_test* + body: + query: 'sequence [process where user == "SYSTEM" and day_of_week == "Monday"] [process where user == "SYSTEM" and day_of_week == "Tuesday"]' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + allow_partial_search_results: true + + - match: {timed_out: false} + - match: {hits.total.value: 0} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute sequence with shard failures, allow_partial_search_results=true and allow_partial_sequence_results=true": + - do: + eql.search: + index: eql_test* + body: + query: 'sequence [process where user == "SYSTEM" and day_of_week == "Monday"] [process where user == "SYSTEM" and day_of_week == "Tuesday"]' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + allow_partial_search_results: true + allow_partial_sequence_results: true + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.sequences.0.events.0._source.user: "SYSTEM"} + - match: {hits.sequences.0.events.0._id: "1"} + - match: {hits.sequences.0.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.sequences.0.events.0.fields.id: [123]} + - match: {hits.sequences.0.events.0.fields.valid: [false]} + - match: {hits.sequences.0.events.0.fields.day_of_week: ["Monday"]} + - match: {hits.sequences.0.events.1._id: "2"} + - match: {hits.sequences.0.events.1.fields.@timestamp: ["1580819696000"]} + - match: {hits.sequences.0.events.1.fields.id: [123]} + - match: {hits.sequences.0.events.1.fields.valid: [true]} + - match: {hits.sequences.0.events.1.fields.day_of_week: ["Tuesday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} + + +--- +"Execute sequence with shard failures, allow_partial_search_results=true and allow_partial_sequence_results=true as query params": + - do: + eql.search: + index: eql_test* + allow_partial_search_results: true + allow_partial_sequence_results: true + body: + query: 'sequence [process where user == "SYSTEM" and day_of_week == "Monday"] [process where user == "SYSTEM" and day_of_week == "Tuesday"]' + fields: [{"field":"@timestamp","format":"epoch_millis"},"id","valid","day_of_week"] + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.sequences.0.events.0._source.user: "SYSTEM"} + - match: {hits.sequences.0.events.0._id: "1"} + - match: {hits.sequences.0.events.0.fields.@timestamp: ["1580733296000"]} + - match: {hits.sequences.0.events.0.fields.id: [123]} + - match: {hits.sequences.0.events.0.fields.valid: [false]} + - match: {hits.sequences.0.events.0.fields.day_of_week: ["Monday"]} + - match: {hits.sequences.0.events.1._id: "2"} + - match: {hits.sequences.0.events.1.fields.@timestamp: ["1580819696000"]} + - match: {hits.sequences.0.events.1.fields.id: [123]} + - match: {hits.sequences.0.events.1.fields.valid: [true]} + - match: {hits.sequences.0.events.1.fields.day_of_week: ["Tuesday"]} + - match: {shard_failures.0.index: "eql_test_rebel"} diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/CCSPartialResultsIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/CCSPartialResultsIT.java new file mode 100644 index 0000000000000..da6bb6180428b --- /dev/null +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/CCSPartialResultsIT.java @@ -0,0 +1,613 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql.action; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.xpack.eql.plugin.EqlPlugin; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class CCSPartialResultsIT extends AbstractMultiClustersTestCase { + + static String REMOTE_CLUSTER = "cluster_a"; + + protected Collection> nodePlugins(String cluster) { + return Collections.singletonList(LocalStateEQLXPackPlugin.class); + } + + protected final Client localClient() { + return client(LOCAL_CLUSTER); + } + + @Override + protected List remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + /** + * + * @return remote node name + */ + private String createSchema() { + final Client remoteClient = client(REMOTE_CLUSTER); + final String remoteNode = cluster(REMOTE_CLUSTER).startDataOnlyNode(); + final String remoteNode2 = cluster(REMOTE_CLUSTER).startDataOnlyNode(); + + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate("test-1-remote") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", remoteNode) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMapping("@timestamp", "type=date"), + TimeValue.timeValueSeconds(60) + ); + + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate("test-2-remote") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", remoteNode2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMapping("@timestamp", "type=date"), + TimeValue.timeValueSeconds(60) + ); + + for (int i = 0; i < 5; i++) { + int val = i * 2; + remoteClient.prepareIndex("test-1-remote") + .setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + for (int i = 0; i < 5; i++) { + int val = i * 2 + 1; + remoteClient.prepareIndex("test-2-remote") + .setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + + remoteClient.admin().indices().prepareRefresh().get(); + return remoteNode; + } + + // ------------------------------------------------------------------------ + // queries with full cluster (no missing shards) + // ------------------------------------------------------------------------ + + public void testNoFailures() throws ExecutionException, InterruptedException, IOException { + createSchema(); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("process where true") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + EqlSearchResponse response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(10)); + for (int i = 0; i < 10; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + i)); + } + assertThat(response.shardFailures().length, is(0)); + + // sequence query on both shards + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 0")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query with missing event on unavailable shard + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(0)); + + // sample query on both shards + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 0")); + assertThat(response.shardFailures().length, is(0)); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and allow_partial_sequence_result=true + // ------------------------------------------------------------------------ + + public void testAllowPartialSearchAndSequence_event() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("process where true") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + } + + public void testAllowPartialSearchAndSequence_sequence() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sequence query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(2).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearchAndSequence_sample() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sample query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and default allow_partial_sequence_results (ie. false) + // ------------------------------------------------------------------------ + + public void testAllowPartialSearch_event() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("process where true") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAllowPartialSearch_sequence() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sequence query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearch_sample() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + // sample query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and with default xpack.eql.default_allow_partial_results=true + // ------------------------------------------------------------------------ + + public void testClusterSetting_event() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + cluster(REMOTE_CLUSTER).client() + .execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ) + .get(); + + // event query + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*").query("process where true"); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + localClient().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sequence() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + cluster(REMOTE_CLUSTER).client() + .execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ) + .get(); + // sequence query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 2]"); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 1] [process where value == 3]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence [process where value == 0] [process where value == 2]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + localClient().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sample() throws ExecutionException, InterruptedException, IOException { + var remoteNode = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + cluster(REMOTE_CLUSTER).stopNode(remoteNode); + + cluster(REMOTE_CLUSTER).client() + .execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ) + .get(); + + // sample query on both shards + var request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 1]"); + var response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 3] [process where value == 1]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices(REMOTE_CLUSTER + ":test-*") + .query("sample by key [process where value == 2] [process where value == 0]"); + response = localClient().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1-remote")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + localClient().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } +} diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/PartialSearchResultsIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/PartialSearchResultsIT.java new file mode 100644 index 0000000000000..9048d11f4eddf --- /dev/null +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/PartialSearchResultsIT.java @@ -0,0 +1,780 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql.action; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.eql.plugin.EqlAsyncGetResultAction; +import org.elasticsearch.xpack.eql.plugin.EqlPlugin; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class PartialSearchResultsIT extends AbstractEqlIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), MockTransportService.TestPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(randomIntBetween(100, 500))) + .build(); + } + + /** + * + * @return node name where the first index is + */ + private String createSchema() { + internalCluster().ensureAtLeastNumDataNodes(2); + final List dataNodes = internalCluster().clusterService() + .state() + .nodes() + .getDataNodes() + .values() + .stream() + .map(DiscoveryNode::getName) + .toList(); + final String assignedNodeForIndex1 = randomFrom(dataNodes); + + assertAcked( + indicesAdmin().prepareCreate("test-1") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", assignedNodeForIndex1) + .build() + ) + .setMapping("@timestamp", "type=date") + ); + assertAcked( + indicesAdmin().prepareCreate("test-2") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.exclude._name", assignedNodeForIndex1) + .build() + ) + .setMapping("@timestamp", "type=date") + ); + + for (int i = 0; i < 5; i++) { + int val = i * 2; + prepareIndex("test-1").setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + for (int i = 0; i < 5; i++) { + int val = i * 2 + 1; + prepareIndex("test-2").setId(Integer.toString(i)) + .setSource("@timestamp", 100000 + val, "event.category", "process", "key", "same", "value", val) + .get(); + } + refresh(); + return assignedNodeForIndex1; + } + + public void testNoFailures() throws Exception { + createSchema(); + + // ------------------------------------------------------------------------ + // queries with full cluster (no missing shards) + // ------------------------------------------------------------------------ + + // event query + var request = new EqlSearchRequest().indices("test-*") + .query("process where true") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + EqlSearchResponse response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(10)); + for (int i = 0; i < 10; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + i)); + } + assertThat(response.shardFailures().length, is(0)); + + // sequence query on both shards + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 0")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 2")); + assertThat(response.shardFailures().length, is(0)); + + // sequence query with missing event on unavailable shard + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(0)); + + // sample query on both shards + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + EqlSearchResponse.Sequence sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(0)); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 2")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 0")); + assertThat(response.shardFailures().length, is(0)); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards. Let them fail + // allow_partial_sequence_results has no effect if allow_partial_sequence_results is not set to true. + // ------------------------------------------------------------------------ + + public void testFailures_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + shouldFail("process where true"); + + } + + public void testFailures_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + shouldFail("sequence [process where value == 1] [process where value == 2]"); + + // sequence query on the available shard only + shouldFail("sequence [process where value == 1] [process where value == 3]"); + + // sequence query on the unavailable shard only + shouldFail("sequence [process where value == 0] [process where value == 2]"); + + // sequence query with missing event on unavailable shard. + shouldFail("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]"); + } + + public void testFailures_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sample query on both shards + shouldFail("sample by key [process where value == 2] [process where value == 1]"); + + // sample query on the available shard only + shouldFail("sample by key [process where value == 3] [process where value == 1]"); + + // sample query on the unavailable shard only + shouldFail("sample by key [process where value == 2] [process where value == 0]"); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and allow_partial_sequence_result=true + // ------------------------------------------------------------------------ + + public void testAllowPartialSearchAndSequenceResults_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + var request = new EqlSearchRequest().indices("test-*").query("process where true").allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAllowPartialSearchAndSequenceResults_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(1).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + sequence = response.hits().sequences().get(0); + assertThat(sequence.events().get(0).toString(), containsString("\"value\" : 1")); + assertThat(sequence.events().get(2).toString(), containsString("\"value\" : 3")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearchAndSequenceResults_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sample query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true) + .allowPartialSequenceResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and allow_partial_search_results=true + // and default allow_partial_sequence_results (ie. false) + // ------------------------------------------------------------------------ + + public void testAllowPartialSearchResults_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + var request = new EqlSearchRequest().indices("test-*").query("process where true").allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAllowPartialSearchResults_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 2]") + .allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 1] [process where value == 3]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sequence [process where value == 0] [process where value == 2]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAllowPartialSearchResults_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sample query on both shards + var request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 1]") + .allowPartialSearchResults(true); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 3] [process where value == 1]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*") + .query("sample by key [process where value == 2] [process where value == 0]") + .allowPartialSearchResults(true); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, this time async, with missing shards and allow_partial_search_results=true + // and default allow_partial_sequence_results (ie. false) + // ------------------------------------------------------------------------ + + public void testAsyncAllowPartialSearchResults_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // event query + var response = runAsync("process where true", true); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + } + + public void testAsyncAllowPartialSearchResults_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + // sequence query on both shards + var response = runAsync("sequence [process where value == 1] [process where value == 2]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + response = runAsync("sequence [process where value == 1] [process where value == 3]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + response = runAsync("sequence [process where value == 0] [process where value == 2]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + response = runAsync( + "sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]", + true + ); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + public void testAsyncAllowPartialSearchResults_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + // sample query on both shards + var response = runAsync("sample by key [process where value == 2] [process where value == 1]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + response = runAsync("sample by key [process where value == 3] [process where value == 1]", true); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + response = runAsync("sample by key [process where value == 2] [process where value == 0]", true); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + } + + // ------------------------------------------------------------------------ + // same queries, with missing shards and with default xpack.eql.default_allow_partial_results=true + // ------------------------------------------------------------------------ + + public void testClusterSetting_event() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ).get(); + + // event query + var request = new EqlSearchRequest().indices("test-*").query("process where true"); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().events().size(), equalTo(5)); + for (int i = 0; i < 5; i++) { + assertThat(response.hits().events().get(i).toString(), containsString("\"value\" : " + (i * 2 + 1))); + } + assertThat(response.shardFailures().length, is(1)); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sequence() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ).get(); + // sequence query on both shards + var request = new EqlSearchRequest().indices("test-*").query("sequence [process where value == 1] [process where value == 2]"); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the available shard only + request = new EqlSearchRequest().indices("test-*").query("sequence [process where value == 1] [process where value == 3]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*").query("sequence [process where value == 0] [process where value == 2]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sequence query with missing event on unavailable shard. THIS IS A FALSE POSITIVE + request = new EqlSearchRequest().indices("test-*") + .query("sequence with maxspan=10s [process where value == 1] ![process where value == 2] [process where value == 3]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + public void testClusterSetting_sample() throws Exception { + final String assignedNodeForIndex1 = createSchema(); + // ------------------------------------------------------------------------ + // stop one of the nodes, make one of the shards unavailable + // ------------------------------------------------------------------------ + + internalCluster().stopNode(assignedNodeForIndex1); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().put(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey(), true) + ) + ).get(); + + // sample query on both shards + var request = new EqlSearchRequest().indices("test-*").query("sample by key [process where value == 2] [process where value == 1]"); + var response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the available shard only + request = new EqlSearchRequest().indices("test-*").query("sample by key [process where value == 3] [process where value == 1]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(1)); + var sample = response.hits().sequences().get(0); + assertThat(sample.events().get(0).toString(), containsString("\"value\" : 3")); + assertThat(sample.events().get(1).toString(), containsString("\"value\" : 1")); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + // sample query on the unavailable shard only + request = new EqlSearchRequest().indices("test-*").query("sample by key [process where value == 2] [process where value == 0]"); + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + assertThat(response.hits().sequences().size(), equalTo(0)); + assertThat(response.shardFailures().length, is(1)); + assertThat(response.shardFailures()[0].index(), is("test-1")); + assertThat(response.shardFailures()[0].reason(), containsString("NoShardAvailableActionException")); + + client().execute( + ClusterUpdateSettingsAction.INSTANCE, + new ClusterUpdateSettingsRequest(TimeValue.THIRTY_SECONDS, TimeValue.THIRTY_SECONDS).persistentSettings( + Settings.builder().putNull(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey()) + ) + ).get(); + } + + private static EqlSearchResponse runAsync(String query, Boolean allowPartialSearchResults) throws InterruptedException, + ExecutionException { + EqlSearchRequest request; + EqlSearchResponse response; + request = new EqlSearchRequest().indices("test-*").query(query).waitForCompletionTimeout(TimeValue.ZERO); + if (allowPartialSearchResults != null) { + request = request.allowPartialSearchResults(allowPartialSearchResults); + } + response = client().execute(EqlSearchAction.INSTANCE, request).get(); + while (response.isRunning()) { + GetAsyncResultRequest getResultsRequest = new GetAsyncResultRequest(response.id()).setKeepAlive(TimeValue.timeValueMinutes(10)) + .setWaitForCompletionTimeout(TimeValue.timeValueMillis(10)); + response = client().execute(EqlAsyncGetResultAction.INSTANCE, getResultsRequest).get(); + } + return response; + } + + private static void shouldFail(String query) throws InterruptedException { + EqlSearchRequest request = new EqlSearchRequest().indices("test-*").query(query); + if (randomBoolean()) { + request = request.allowPartialSearchResults(false); + } + if (randomBoolean()) { + request = request.allowPartialSequenceResults(randomBoolean()); + } + try { + client().execute(EqlSearchAction.INSTANCE, request).get(); + fail(); + } catch (ExecutionException e) { + assertThat(e.getCause(), instanceOf(SearchPhaseExecutionException.class)); + } + } +} diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java index 0aeddd525e317..5804e11b72ff5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java @@ -63,6 +63,8 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re private List fetchFields; private Map runtimeMappings = emptyMap(); private int maxSamplesPerKey = RequestDefaults.MAX_SAMPLES_PER_KEY; + private Boolean allowPartialSearchResults; + private Boolean allowPartialSequenceResults; // Async settings private TimeValue waitForCompletionTimeout = null; @@ -83,6 +85,8 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re static final String KEY_FETCH_FIELDS = "fields"; static final String KEY_RUNTIME_MAPPINGS = "runtime_mappings"; static final String KEY_MAX_SAMPLES_PER_KEY = "max_samples_per_key"; + static final String KEY_ALLOW_PARTIAL_SEARCH_RESULTS = "allow_partial_search_results"; + static final String KEY_ALLOW_PARTIAL_SEQUENCE_RESULTS = "allow_partial_sequence_results"; static final ParseField FILTER = new ParseField(KEY_FILTER); static final ParseField TIMESTAMP_FIELD = new ParseField(KEY_TIMESTAMP_FIELD); @@ -97,6 +101,8 @@ public class EqlSearchRequest extends ActionRequest implements IndicesRequest.Re static final ParseField RESULT_POSITION = new ParseField(KEY_RESULT_POSITION); static final ParseField FETCH_FIELDS_FIELD = SearchSourceBuilder.FETCH_FIELDS_FIELD; static final ParseField MAX_SAMPLES_PER_KEY = new ParseField(KEY_MAX_SAMPLES_PER_KEY); + static final ParseField ALLOW_PARTIAL_SEARCH_RESULTS = new ParseField(KEY_ALLOW_PARTIAL_SEARCH_RESULTS); + static final ParseField ALLOW_PARTIAL_SEQUENCE_RESULTS = new ParseField(KEY_ALLOW_PARTIAL_SEQUENCE_RESULTS); private static final ObjectParser PARSER = objectParser(EqlSearchRequest::new); @@ -135,6 +141,13 @@ public EqlSearchRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { maxSamplesPerKey = in.readInt(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + allowPartialSearchResults = in.readOptionalBoolean(); + allowPartialSequenceResults = in.readOptionalBoolean(); + } else { + allowPartialSearchResults = false; + allowPartialSequenceResults = false; + } } @Override @@ -245,6 +258,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(KEY_RUNTIME_MAPPINGS, runtimeMappings); } builder.field(KEY_MAX_SAMPLES_PER_KEY, maxSamplesPerKey); + builder.field(KEY_ALLOW_PARTIAL_SEARCH_RESULTS, allowPartialSearchResults); + builder.field(KEY_ALLOW_PARTIAL_SEQUENCE_RESULTS, allowPartialSequenceResults); return builder; } @@ -279,6 +294,8 @@ protected static ObjectParser objectParser parser.declareField(EqlSearchRequest::fetchFields, EqlSearchRequest::parseFetchFields, FETCH_FIELDS_FIELD, ValueType.VALUE_ARRAY); parser.declareObject(EqlSearchRequest::runtimeMappings, (p, c) -> p.map(), SearchSourceBuilder.RUNTIME_MAPPINGS_FIELD); parser.declareInt(EqlSearchRequest::maxSamplesPerKey, MAX_SAMPLES_PER_KEY); + parser.declareBoolean(EqlSearchRequest::allowPartialSearchResults, ALLOW_PARTIAL_SEARCH_RESULTS); + parser.declareBoolean(EqlSearchRequest::allowPartialSequenceResults, ALLOW_PARTIAL_SEQUENCE_RESULTS); return parser; } @@ -427,6 +444,24 @@ public EqlSearchRequest maxSamplesPerKey(int maxSamplesPerKey) { return this; } + public Boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public EqlSearchRequest allowPartialSearchResults(Boolean val) { + this.allowPartialSearchResults = val; + return this; + } + + public Boolean allowPartialSequenceResults() { + return allowPartialSequenceResults; + } + + public EqlSearchRequest allowPartialSequenceResults(Boolean val) { + this.allowPartialSequenceResults = val; + return this; + } + private static List parseFetchFields(XContentParser parser) throws IOException { List result = new ArrayList<>(); Token token = parser.currentToken(); @@ -470,6 +505,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeInt(maxSamplesPerKey); } + if (out.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + out.writeOptionalBoolean(allowPartialSearchResults); + out.writeOptionalBoolean(allowPartialSequenceResults); + } } @Override @@ -496,7 +535,9 @@ public boolean equals(Object o) { && Objects.equals(resultPosition, that.resultPosition) && Objects.equals(fetchFields, that.fetchFields) && Objects.equals(runtimeMappings, that.runtimeMappings) - && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey); + && Objects.equals(maxSamplesPerKey, that.maxSamplesPerKey) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults) + && Objects.equals(allowPartialSequenceResults, that.allowPartialSequenceResults); } @Override @@ -517,7 +558,9 @@ public int hashCode() { resultPosition, fetchFields, runtimeMappings, - maxSamplesPerKey + maxSamplesPerKey, + allowPartialSearchResults, + allowPartialSequenceResults ); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index 2b7b8b074fa71..a4d93b7659970 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -7,8 +7,11 @@ package org.elasticsearch.xpack.eql.action; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -17,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; @@ -36,6 +40,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -54,6 +59,7 @@ public class EqlSearchResponse extends ActionResponse implements ToXContentObjec private final String asyncExecutionId; private final boolean isRunning; private final boolean isPartial; + private final ShardSearchFailure[] shardFailures; private static final class Fields { static final String TOOK = "took"; @@ -62,6 +68,7 @@ private static final class Fields { static final String ID = "id"; static final String IS_RUNNING = "is_running"; static final String IS_PARTIAL = "is_partial"; + static final String SHARD_FAILURES = "shard_failures"; } private static final ParseField TOOK = new ParseField(Fields.TOOK); @@ -70,8 +77,10 @@ private static final class Fields { private static final ParseField ID = new ParseField(Fields.ID); private static final ParseField IS_RUNNING = new ParseField(Fields.IS_RUNNING); private static final ParseField IS_PARTIAL = new ParseField(Fields.IS_PARTIAL); + private static final ParseField SHARD_FAILURES = new ParseField(Fields.SHARD_FAILURES); private static final InstantiatingObjectParser PARSER; + static { InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( "eql/search_response", @@ -84,11 +93,12 @@ private static final class Fields { parser.declareString(optionalConstructorArg(), ID); parser.declareBoolean(constructorArg(), IS_RUNNING); parser.declareBoolean(constructorArg(), IS_PARTIAL); + parser.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.EMPTY_ARRAY, SHARD_FAILURES); PARSER = parser.build(); } - public EqlSearchResponse(Hits hits, long tookInMillis, boolean isTimeout) { - this(hits, tookInMillis, isTimeout, null, false, false); + public EqlSearchResponse(Hits hits, long tookInMillis, boolean isTimeout, ShardSearchFailure[] shardFailures) { + this(hits, tookInMillis, isTimeout, null, false, false, shardFailures); } public EqlSearchResponse( @@ -97,7 +107,8 @@ public EqlSearchResponse( boolean isTimeout, String asyncExecutionId, boolean isRunning, - boolean isPartial + boolean isPartial, + ShardSearchFailure[] shardFailures ) { super(); this.hits = hits == null ? Hits.EMPTY : hits; @@ -106,6 +117,7 @@ public EqlSearchResponse( this.asyncExecutionId = asyncExecutionId; this.isRunning = isRunning; this.isPartial = isPartial; + this.shardFailures = shardFailures; } public EqlSearchResponse(StreamInput in) throws IOException { @@ -116,6 +128,11 @@ public EqlSearchResponse(StreamInput in) throws IOException { asyncExecutionId = in.readOptionalString(); isPartial = in.readBoolean(); isRunning = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + shardFailures = in.readArray(ShardSearchFailure::readShardSearchFailure, ShardSearchFailure[]::new); + } else { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } } public static EqlSearchResponse fromXContent(XContentParser parser) { @@ -130,6 +147,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(asyncExecutionId); out.writeBoolean(isPartial); out.writeBoolean(isRunning); + if (out.getTransportVersion().onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS)) { + out.writeArray(shardFailures); + } } @Override @@ -147,6 +167,13 @@ private XContentBuilder innerToXContent(XContentBuilder builder, Params params) builder.field(IS_RUNNING.getPreferredName(), isRunning); builder.field(TOOK.getPreferredName(), tookInMillis); builder.field(TIMED_OUT.getPreferredName(), isTimeout); + if (CollectionUtils.isEmpty(shardFailures) == false) { + builder.startArray(SHARD_FAILURES.getPreferredName()); + for (ShardOperationFailedException shardFailure : ExceptionsHelper.groupBy(shardFailures)) { + shardFailure.toXContent(builder, params); + } + builder.endArray(); + } hits.toXContent(builder, params); return builder; } @@ -178,6 +205,10 @@ public boolean isPartial() { return isPartial; } + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -190,12 +221,13 @@ public boolean equals(Object o) { return Objects.equals(hits, that.hits) && Objects.equals(tookInMillis, that.tookInMillis) && Objects.equals(isTimeout, that.isTimeout) - && Objects.equals(asyncExecutionId, that.asyncExecutionId); + && Objects.equals(asyncExecutionId, that.asyncExecutionId) + && Arrays.equals(shardFailures, that.shardFailures); } @Override public int hashCode() { - return Objects.hash(hits, tookInMillis, isTimeout, asyncExecutionId); + return Objects.hash(hits, tookInMillis, isTimeout, asyncExecutionId, Arrays.hashCode(shardFailures)); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java index 2a1bc3b7adb67..0fc8e8c88d7d9 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchTask.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.action; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.async.AsyncExecutionId; @@ -39,7 +40,8 @@ public EqlSearchResponse getCurrentResult() { false, getExecutionId().getEncoded(), true, - true + true, + ShardSearchFailure.EMPTY_ARRAY ); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java index b26c815c1a2b5..672d6b87a8dbb 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java @@ -167,7 +167,9 @@ public Executable assemble( criteria.subList(0, completionStage), criteria.get(completionStage), matcher, - listOfKeys + listOfKeys, + cfg.allowPartialSearchResults(), + cfg.allowPartialSequenceResults() ); return w; @@ -235,7 +237,8 @@ public Executable assemble(List> listOfKeys, List cfg.fetchSize(), limit, session.circuitBreaker(), - cfg.maxSamplesPerKey() + cfg.maxSamplesPerKey(), + cfg.allowPartialSearchResults() ); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java index 823cd04d25f45..9fecf958b9714 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/AbstractPayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.execution.payload; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.eql.session.Payload; @@ -14,10 +15,12 @@ public abstract class AbstractPayload implements Payload { private final boolean timedOut; private final TimeValue timeTook; + private ShardSearchFailure[] shardFailures; - protected AbstractPayload(boolean timedOut, TimeValue timeTook) { + protected AbstractPayload(boolean timedOut, TimeValue timeTook, ShardSearchFailure[] shardFailures) { this.timedOut = timedOut; this.timeTook = timeTook; + this.shardFailures = shardFailures; } @Override @@ -29,4 +32,9 @@ public boolean timedOut() { public TimeValue timeTook() { return timeTook; } + + @Override + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java index a7845ca62dccc..6471bc0814f70 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/payload/EventPayload.java @@ -20,7 +20,7 @@ public class EventPayload extends AbstractPayload { private final List values; public EventPayload(SearchResponse response) { - super(response.isTimedOut(), response.getTook()); + super(response.isTimedOut(), response.getTook(), response.getShardFailures()); SearchHits hits = response.getHits(); values = new ArrayList<>(hits.getHits().length); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java index 89f1c4d1eb041..b9b7cfd6b615a 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; @@ -35,6 +36,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -44,6 +46,7 @@ import static org.elasticsearch.common.Strings.EMPTY_ARRAY; import static org.elasticsearch.xpack.eql.execution.assembler.SampleQueryRequest.COMPOSITE_AGG_NAME; import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.prepareRequest; +import static org.elasticsearch.xpack.eql.util.SearchHitUtils.addShardFailures; public class SampleIterator implements Executable { @@ -58,6 +61,7 @@ public class SampleIterator implements Executable { private final Limit limit; private final int maxSamplesPerKey; private long startTime; + private Map shardFailures = new HashMap<>(); // ---------- CIRCUIT BREAKER ----------- @@ -84,13 +88,16 @@ public class SampleIterator implements Executable { */ private long previousTotalPageSize = 0; + private boolean allowPartialSearchResults; + public SampleIterator( QueryClient client, List criteria, int fetchSize, Limit limit, CircuitBreaker circuitBreaker, - int maxSamplesPerKey + int maxSamplesPerKey, + boolean allowPartialSearchResults ) { this.client = client; this.criteria = criteria; @@ -100,6 +107,7 @@ public SampleIterator( this.limit = limit; this.circuitBreaker = circuitBreaker; this.maxSamplesPerKey = maxSamplesPerKey; + this.allowPartialSearchResults = allowPartialSearchResults; } @Override @@ -147,6 +155,7 @@ private void advance(ActionListener listener) { private void queryForCompositeAggPage(ActionListener listener, final SampleQueryRequest request) { client.query(request, listener.delegateFailureAndWrap((delegate, r) -> { + addShardFailures(shardFailures, r); // either the fields values or the fields themselves are missing // or the filter applied on the eql query matches no documents if (r.hasAggregations() == false) { @@ -209,13 +218,16 @@ private void finalStep(ActionListener listener) { for (SampleCriterion criterion : criteria) { SampleQueryRequest r = criterion.finalQuery(); r.singleKeyPair(compositeKeyValues, maxCriteria, maxSamplesPerKey); - searches.add(prepareRequest(r.searchSource(), false, EMPTY_ARRAY)); + searches.add(prepareRequest(r.searchSource(), false, allowPartialSearchResults, EMPTY_ARRAY)); } sampleKeys.add(new SequenceKey(compositeKeyValues.toArray())); } int initialSize = samples.size(); client.multiQuery(searches, listener.delegateFailureAndWrap((delegate, r) -> { + for (MultiSearchResponse.Item item : r) { + addShardFailures(shardFailures, item.getResponse()); + } List> sample = new ArrayList<>(maxCriteria); MultiSearchResponse.Item[] response = r.getResponses(); int docGroupsCounter = 1; @@ -280,14 +292,23 @@ private void payload(ActionListener listener) { log.trace("Sending payload for [{}] samples", samples.size()); if (samples.isEmpty()) { - listener.onResponse(new EmptyPayload(Type.SAMPLE, timeTook())); + listener.onResponse(new EmptyPayload(Type.SAMPLE, timeTook(), shardFailures.values().toArray(new ShardSearchFailure[0]))); return; } // get results through search (to keep using PIT) client.fetchHits( hits(samples), - ActionListeners.map(listener, listOfHits -> new SamplePayload(samples, listOfHits, false, timeTook())) + ActionListeners.map( + listener, + listOfHits -> new SamplePayload( + samples, + listOfHits, + false, + timeTook(), + shardFailures.values().toArray(new ShardSearchFailure[0]) + ) + ) ); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java index 121f4c208273b..aee084dd88734 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SamplePayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.execution.sample; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event; @@ -19,8 +20,14 @@ class SamplePayload extends AbstractPayload { private final List values; - SamplePayload(List samples, List> docs, boolean timedOut, TimeValue timeTook) { - super(timedOut, timeTook); + SamplePayload( + List samples, + List> docs, + boolean timedOut, + TimeValue timeTook, + ShardSearchFailure[] shardFailures + ) { + super(timedOut, timeTook, shardFailures); values = new ArrayList<>(samples.size()); for (int i = 0; i < samples.size(); i++) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java index 6cbe5298b5950..18623c17dcffb 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/BasicQueryClient.java @@ -46,12 +46,14 @@ public class BasicQueryClient implements QueryClient { final Client client; final String[] indices; final List fetchFields; + private final boolean allowPartialSearchResults; public BasicQueryClient(EqlSession eqlSession) { this.cfg = eqlSession.configuration(); this.client = eqlSession.client(); this.indices = cfg.indices(); this.fetchFields = cfg.fetchFields(); + this.allowPartialSearchResults = cfg.allowPartialSearchResults(); } @Override @@ -60,11 +62,11 @@ public void query(QueryRequest request, ActionListener listener) // set query timeout searchSource.timeout(cfg.requestTimeout()); - SearchRequest search = prepareRequest(searchSource, false, indices); - search(search, searchLogListener(listener, log)); + SearchRequest search = prepareRequest(searchSource, false, allowPartialSearchResults, indices); + search(search, allowPartialSearchResults, searchLogListener(listener, log, allowPartialSearchResults)); } - protected void search(SearchRequest search, ActionListener listener) { + protected void search(SearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { if (cfg.isCancelled()) { listener.onFailure(new TaskCancelledException("cancelled")); return; @@ -77,7 +79,7 @@ protected void search(SearchRequest search, ActionListener liste client.search(search, listener); } - protected void search(MultiSearchRequest search, ActionListener listener) { + protected void search(MultiSearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { if (cfg.isCancelled()) { listener.onFailure(new TaskCancelledException("cancelled")); return; @@ -91,7 +93,7 @@ protected void search(MultiSearchRequest search, ActionListener> refs, ActionListener { + search(multiSearchBuilder.request(), allowPartialSearchResults, listener.delegateFailureAndWrap((delegate, r) -> { for (MultiSearchResponse.Item item : r.getResponses()) { // check for failures if (item.isFailure()) { @@ -187,6 +189,6 @@ public void multiQuery(List searches, ActionListener listener) { + protected void search(SearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { // no pitId, ask for one if (pitId == null) { - openPIT(listener, () -> searchWithPIT(search, listener)); + openPIT(listener, () -> searchWithPIT(search, listener, allowPartialSearchResults), allowPartialSearchResults); } else { - searchWithPIT(search, listener); + searchWithPIT(search, listener, allowPartialSearchResults); } } - private void searchWithPIT(SearchRequest request, ActionListener listener) { + private void searchWithPIT(SearchRequest request, ActionListener listener, boolean allowPartialSearchResults) { makeRequestPITCompatible(request); // get the pid on each response - super.search(request, pitListener(SearchResponse::pointInTimeId, listener)); + super.search(request, allowPartialSearchResults, pitListener(SearchResponse::pointInTimeId, listener)); } @Override - protected void search(MultiSearchRequest search, ActionListener listener) { + protected void search(MultiSearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { // no pitId, ask for one if (pitId == null) { - openPIT(listener, () -> searchWithPIT(search, listener)); + openPIT(listener, () -> searchWithPIT(search, allowPartialSearchResults, listener), allowPartialSearchResults); } else { - searchWithPIT(search, listener); + searchWithPIT(search, allowPartialSearchResults, listener); } } - private void searchWithPIT(MultiSearchRequest search, ActionListener listener) { + private void searchWithPIT(MultiSearchRequest search, boolean allowPartialSearchResults, ActionListener listener) { for (SearchRequest request : search.requests()) { makeRequestPITCompatible(request); } // get the pid on each request - super.search(search, pitListener(r -> { + super.search(search, allowPartialSearchResults, pitListener(r -> { // get pid for (MultiSearchResponse.Item item : r.getResponses()) { // pick the first non-failing response @@ -135,9 +135,10 @@ private ActionListener pitListener( ); } - private void openPIT(ActionListener listener, Runnable runnable) { + private void openPIT(ActionListener listener, Runnable runnable, boolean allowPartialSearchResults) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).indicesOptions(IndexResolver.FIELD_CAPS_INDICES_OPTIONS) - .keepAlive(keepAlive); + .keepAlive(keepAlive) + .allowPartialSearchResults(allowPartialSearchResults); request.indexFilter(filter); client.execute(TransportOpenPointInTimeAction.TYPE, request, listener.delegateFailureAndWrap((l, r) -> { pitId = r.getPointInTimeId(); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index 40f7f7139efa1..92af8c562f840 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -56,10 +56,14 @@ public final class RuntimeUtils { private RuntimeUtils() {} - public static ActionListener searchLogListener(ActionListener listener, Logger log) { + public static ActionListener searchLogListener( + ActionListener listener, + Logger log, + boolean allowPartialResults + ) { return listener.delegateFailureAndWrap((delegate, response) -> { ShardSearchFailure[] failures = response.getShardFailures(); - if (CollectionUtils.isEmpty(failures) == false) { + if (CollectionUtils.isEmpty(failures) == false && allowPartialResults == false) { delegate.onFailure(new EqlIllegalArgumentException(failures[0].reason(), failures[0].getCause())); return; } @@ -70,16 +74,22 @@ public static ActionListener searchLogListener(ActionListener multiSearchLogListener(ActionListener listener, Logger log) { + public static ActionListener multiSearchLogListener( + ActionListener listener, + boolean allowPartialSearchResults, + Logger log + ) { return listener.delegateFailureAndWrap((delegate, items) -> { for (MultiSearchResponse.Item item : items) { Exception failure = item.getFailure(); SearchResponse response = item.getResponse(); if (failure == null) { - ShardSearchFailure[] failures = response.getShardFailures(); - if (CollectionUtils.isEmpty(failures) == false) { - failure = new EqlIllegalArgumentException(failures[0].reason(), failures[0].getCause()); + if (allowPartialSearchResults == false) { + ShardSearchFailure[] failures = response.getShardFailures(); + if (CollectionUtils.isEmpty(failures) == false) { + failure = new EqlIllegalArgumentException(failures[0].reason(), failures[0].getCause()); + } } } if (failure != null) { @@ -170,11 +180,16 @@ public static HitExtractor createExtractor(FieldExtraction ref, EqlConfiguration throw new EqlIllegalArgumentException("Unexpected value reference {}", ref.getClass()); } - public static SearchRequest prepareRequest(SearchSourceBuilder source, boolean includeFrozen, String... indices) { + public static SearchRequest prepareRequest( + SearchSourceBuilder source, + boolean includeFrozen, + boolean allowPartialSearchResults, + String... indices + ) { SearchRequest searchRequest = new SearchRequest(); searchRequest.indices(indices); searchRequest.source(source); - searchRequest.allowPartialSearchResults(false); + searchRequest.allowPartialSearchResults(allowPartialSearchResults); searchRequest.indicesOptions( includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS ); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java index 45083babddbb4..b4a8edc79b3ad 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequencePayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.execution.sequence; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event; @@ -19,8 +20,14 @@ class SequencePayload extends AbstractPayload { private final List values; - SequencePayload(List sequences, List> docs, boolean timedOut, TimeValue timeTook) { - super(timedOut, timeTook); + SequencePayload( + List sequences, + List> docs, + boolean timedOut, + TimeValue timeTook, + ShardSearchFailure[] shardFailures + ) { + super(timedOut, timeTook, shardFailures); values = new ArrayList<>(sequences.size()); for (int i = 0; i < sequences.size(); i++) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java index eabf6df518ad4..fac8788db0f95 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -41,6 +42,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -51,6 +53,7 @@ import static org.elasticsearch.action.ActionListener.runAfter; import static org.elasticsearch.xpack.eql.execution.ExecutionUtils.copySource; import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.combineFilters; +import static org.elasticsearch.xpack.eql.util.SearchHitUtils.addShardFailures; import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex; /** @@ -103,6 +106,9 @@ protected boolean removeEldestEntry(Map.Entry eldest) { private final boolean hasKeys; private final List> listOfKeys; + private final boolean allowPartialSearchResults; + private final boolean allowPartialSequenceResults; + private Map shardFailures = new HashMap<>(); // flag used for DESC sequences to indicate whether // the window needs to restart (since the DESC query still has results) @@ -127,7 +133,10 @@ public TumblingWindow( List criteria, SequenceCriterion until, SequenceMatcher matcher, - List> listOfKeys + List> listOfKeys, + boolean allowPartialSearchResults, + boolean allowPartialSequenceResults + ) { this.client = client; @@ -141,6 +150,8 @@ public TumblingWindow( this.hasKeys = baseRequest.keySize() > 0; this.restartWindowFromTailQuery = baseRequest.descending(); this.listOfKeys = listOfKeys; + this.allowPartialSearchResults = allowPartialSearchResults; + this.allowPartialSequenceResults = allowPartialSequenceResults; } @Override @@ -158,6 +169,9 @@ public void execute(ActionListener listener) { * Move the window while preserving the same base. */ private void tumbleWindow(int currentStage, ActionListener listener) { + if (allowPartialSequenceResults == false && shardFailures.isEmpty() == false) { + doPayload(listener); + } if (currentStage > matcher.firstPositiveStage && matcher.hasCandidates() == false) { if (restartWindowFromTailQuery) { currentStage = matcher.firstPositiveStage; @@ -224,6 +238,9 @@ public void checkMissingEvents(Runnable next, ActionListener listener) private void doCheckMissingEvents(List batchToCheck, MultiSearchResponse p, ActionListener listener, Runnable next) { MultiSearchResponse.Item[] responses = p.getResponses(); + for (MultiSearchResponse.Item response : responses) { + addShardFailures(shardFailures, response.getResponse()); + } int nextResponse = 0; for (Sequence sequence : batchToCheck) { boolean leading = true; @@ -316,7 +333,14 @@ private List prepareQueryForMissingEvents(List toCheck) } addKeyFilter(i, sequence, builder); RuntimeUtils.combineFilters(builder, range); - result.add(RuntimeUtils.prepareRequest(builder.size(1).trackTotalHits(false), false, Strings.EMPTY_ARRAY)); + result.add( + RuntimeUtils.prepareRequest( + builder.size(1).trackTotalHits(false), + false, + allowPartialSearchResults, + Strings.EMPTY_ARRAY + ) + ); } else { leading = false; } @@ -361,6 +385,7 @@ private void advance(int stage, ActionListener listener) { * Execute the base query. */ private void baseCriterion(int baseStage, SearchResponse r, ActionListener listener) { + addShardFailures(shardFailures, r); SequenceCriterion base = criteria.get(baseStage); SearchHits hits = r.getHits(); @@ -731,8 +756,10 @@ private void doPayload(ActionListener listener) { log.trace("Sending payload for [{}] sequences", completed.size()); - if (completed.isEmpty()) { - listener.onResponse(new EmptyPayload(Type.SEQUENCE, timeTook())); + if (completed.isEmpty() || (allowPartialSequenceResults == false && shardFailures.isEmpty() == false)) { + listener.onResponse( + new EmptyPayload(Type.SEQUENCE, timeTook(), shardFailures.values().toArray(new ShardSearchFailure[shardFailures.size()])) + ); return; } @@ -741,7 +768,13 @@ private void doPayload(ActionListener listener) { if (criteria.get(matcher.firstPositiveStage).descending()) { Collections.reverse(completed); } - return new SequencePayload(completed, addMissingEventPlaceholders(listOfHits), false, timeTook()); + return new SequencePayload( + completed, + addMissingEventPlaceholders(listOfHits), + false, + timeTook(), + shardFailures.values().toArray(new ShardSearchFailure[0]) + ); })); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java index 084a5e74a47e8..210f88c991539 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java @@ -60,6 +60,20 @@ public class EqlPlugin extends Plugin implements ActionPlugin, CircuitBreakerPlu Setting.Property.DeprecatedWarning ); + public static final Setting DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS = Setting.boolSetting( + "xpack.eql.default_allow_partial_results", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS = Setting.boolSetting( + "xpack.eql.default_allow_partial_sequence_results", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + public EqlPlugin() {} @Override @@ -86,7 +100,7 @@ private Collection createComponents(Client client, Settings settings, Cl */ @Override public List> getSettings() { - return List.of(EQL_ENABLED_SETTING); + return List.of(EQL_ENABLED_SETTING, DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java index e24a4749f45cd..65def24563e5e 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/RestEqlSearchAction.java @@ -64,6 +64,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } eqlRequest.keepOnCompletion(request.paramAsBoolean("keep_on_completion", eqlRequest.keepOnCompletion())); eqlRequest.ccsMinimizeRoundtrips(request.paramAsBoolean("ccs_minimize_roundtrips", eqlRequest.ccsMinimizeRoundtrips())); + eqlRequest.allowPartialSearchResults( + request.paramAsBoolean("allow_partial_search_results", eqlRequest.allowPartialSearchResults()) + ); + eqlRequest.allowPartialSequenceResults( + request.paramAsBoolean("allow_partial_sequence_results", eqlRequest.allowPartialSequenceResults()) + ); } return channel -> { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java index c0141da2432ce..582352722fc58 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/TransportEqlSearchAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -144,7 +145,8 @@ public EqlSearchResponse initialResponse(EqlSearchTask task) { false, task.getExecutionId().getEncoded(), true, - true + true, + ShardSearchFailure.EMPTY_ARRAY ); } @@ -231,6 +233,12 @@ public static void operation( request.indicesOptions(), request.fetchSize(), request.maxSamplesPerKey(), + request.allowPartialSearchResults() == null + ? defaultAllowPartialSearchResults(clusterService) + : request.allowPartialSearchResults(), + request.allowPartialSequenceResults() == null + ? defaultAllowPartialSequenceResults(clusterService) + : request.allowPartialSequenceResults(), clientId, new TaskId(nodeId, task.getId()), task @@ -244,12 +252,34 @@ public static void operation( } } + private static boolean defaultAllowPartialSearchResults(ClusterService clusterService) { + if (clusterService.getClusterSettings() == null) { + return EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getDefault(Settings.EMPTY); + } + return clusterService.getClusterSettings().get(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS); + } + + private static boolean defaultAllowPartialSequenceResults(ClusterService clusterService) { + if (clusterService.getClusterSettings() == null) { + return EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS.getDefault(Settings.EMPTY); + } + return clusterService.getClusterSettings().get(EqlPlugin.DEFAULT_ALLOW_PARTIAL_SEQUENCE_RESULTS); + } + static EqlSearchResponse createResponse(Results results, AsyncExecutionId id) { EqlSearchResponse.Hits hits = new EqlSearchResponse.Hits(results.events(), results.sequences(), results.totalHits()); if (id != null) { - return new EqlSearchResponse(hits, results.tookTime().getMillis(), results.timedOut(), id.getEncoded(), false, false); + return new EqlSearchResponse( + hits, + results.tookTime().getMillis(), + results.timedOut(), + id.getEncoded(), + false, + false, + results.shardFailures() + ); } else { - return new EqlSearchResponse(hits, results.tookTime().getMillis(), results.timedOut()); + return new EqlSearchResponse(hits, results.tookTime().getMillis(), results.timedOut(), results.shardFailures()); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java index 9822285465087..33ed5799cd073 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EmptyPayload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.session; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import java.util.List; @@ -17,14 +18,16 @@ public class EmptyPayload implements Payload { private final Type type; private final TimeValue timeTook; + private final ShardSearchFailure[] shardFailures; public EmptyPayload(Type type) { - this(type, TimeValue.ZERO); + this(type, TimeValue.ZERO, ShardSearchFailure.EMPTY_ARRAY); } - public EmptyPayload(Type type, TimeValue timeTook) { + public EmptyPayload(Type type, TimeValue timeTook, ShardSearchFailure[] shardFailures) { this.type = type; this.timeTook = timeTook; + this.shardFailures = shardFailures; } @Override @@ -46,4 +49,10 @@ public TimeValue timeTook() { public List values() { return emptyList(); } + + @Override + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } + } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java index 8dd8220fb63bc..8242b0b533ad3 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlConfiguration.java @@ -30,6 +30,8 @@ public class EqlConfiguration extends org.elasticsearch.xpack.ql.session.Configu private final EqlSearchTask task; private final int fetchSize; private final int maxSamplesPerKey; + private final boolean allowPartialSearchResults; + private final boolean allowPartialSequenceResults; @Nullable private final QueryBuilder filter; @@ -50,6 +52,8 @@ public EqlConfiguration( IndicesOptions indicesOptions, int fetchSize, int maxSamplesPerKey, + boolean allowPartialSearchResults, + boolean allowPartialSequenceResults, String clientId, TaskId taskId, EqlSearchTask task @@ -67,6 +71,8 @@ public EqlConfiguration( this.task = task; this.fetchSize = fetchSize; this.maxSamplesPerKey = maxSamplesPerKey; + this.allowPartialSearchResults = allowPartialSearchResults; + this.allowPartialSequenceResults = allowPartialSequenceResults; } public String[] indices() { @@ -89,6 +95,14 @@ public int maxSamplesPerKey() { return maxSamplesPerKey; } + public boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public boolean allowPartialSequenceResults() { + return allowPartialSequenceResults; + } + public QueryBuilder filter() { return filter; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java index 1d82478e6db26..05e614714a5aa 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Payload.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.session; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import java.util.List; @@ -29,4 +30,6 @@ enum Type { TimeValue timeTook(); List values(); + + ShardSearchFailure[] shardFailures(); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java index bb76c08c801cb..13886470f21f5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/Results.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Event; import org.elasticsearch.xpack.eql.action.EqlSearchResponse.Sequence; @@ -23,18 +24,28 @@ public class Results { private final boolean timedOut; private final TimeValue tookTime; private final Type type; + private ShardSearchFailure[] shardFailures; public static Results fromPayload(Payload payload) { List values = payload.values(); - return new Results(new TotalHits(values.size(), Relation.EQUAL_TO), payload.timeTook(), false, values, payload.resultType()); + payload.shardFailures(); + return new Results( + new TotalHits(values.size(), Relation.EQUAL_TO), + payload.timeTook(), + false, + values, + payload.resultType(), + payload.shardFailures() + ); } - Results(TotalHits totalHits, TimeValue tookTime, boolean timedOut, List results, Type type) { + Results(TotalHits totalHits, TimeValue tookTime, boolean timedOut, List results, Type type, ShardSearchFailure[] shardFailures) { this.totalHits = totalHits; this.tookTime = tookTime; this.timedOut = timedOut; this.results = results; this.type = type; + this.shardFailures = shardFailures; } public TotalHits totalHits() { @@ -51,6 +62,10 @@ public List sequences() { return (type == Type.SEQUENCE || type == Type.SAMPLE) ? (List) results : null; } + public ShardSearchFailure[] shardFailures() { + return shardFailures; + } + public TimeValue tookTime() { return tookTime; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java index 91795ac15b53e..2b5ec9718cfc4 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/util/SearchHitUtils.java @@ -7,8 +7,12 @@ package org.elasticsearch.xpack.eql.util; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.search.SearchHit; +import java.util.Map; + import static org.elasticsearch.transport.RemoteClusterAware.buildRemoteIndexName; public final class SearchHitUtils { @@ -16,4 +20,12 @@ public final class SearchHitUtils { public static String qualifiedIndex(SearchHit hit) { return buildRemoteIndexName(hit.getClusterAlias(), hit.getIndex()); } + + public static void addShardFailures(Map shardFailures, SearchResponse r) { + if (r.getShardFailures() != null) { + for (ShardSearchFailure shardFailure : r.getShardFailures()) { + shardFailures.put(shardFailure.toString(), shardFailure); + } + } + } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java index a1aa8e4bd98d7..75884fab4dbb3 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/EqlTestUtils.java @@ -51,6 +51,8 @@ private EqlTestUtils() {} null, 123, 1, + false, + true, "", new TaskId("test", 123), null @@ -69,6 +71,8 @@ public static EqlConfiguration randomConfiguration() { randomIndicesOptions(), randomIntBetween(1, 1000), randomIntBetween(1, 1000), + randomBoolean(), + randomBoolean(), randomAlphaOfLength(16), new TaskId(randomAlphaOfLength(10), randomNonNegativeLong()), randomTask() diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java index 0ff9fa9131b27..1a06aead910c8 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java @@ -80,6 +80,8 @@ protected EqlSearchRequest createTestInstance() { .waitForCompletionTimeout(randomTimeValue()) .keepAlive(randomTimeValue()) .keepOnCompletion(randomBoolean()) + .allowPartialSearchResults(randomBoolean()) + .allowPartialSequenceResults(randomBoolean()) .fetchFields(randomFetchFields) .runtimeMappings(randomRuntimeMappings()) .resultPosition(randomFrom("tail", "head")) @@ -136,6 +138,12 @@ protected EqlSearchRequest mutateInstanceForVersion(EqlSearchRequest instance, T mutatedInstance.runtimeMappings(version.onOrAfter(TransportVersions.V_7_13_0) ? instance.runtimeMappings() : emptyMap()); mutatedInstance.resultPosition(version.onOrAfter(TransportVersions.V_7_17_8) ? instance.resultPosition() : "tail"); mutatedInstance.maxSamplesPerKey(version.onOrAfter(TransportVersions.V_8_7_0) ? instance.maxSamplesPerKey() : 1); + mutatedInstance.allowPartialSearchResults( + version.onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS) ? instance.allowPartialSearchResults() : false + ); + mutatedInstance.allowPartialSequenceResults( + version.onOrAfter(TransportVersions.EQL_ALLOW_PARTIAL_SEARCH_RESULTS) ? instance.allowPartialSequenceResults() : false + ); return mutatedInstance; } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index 6cb283d11848e..fa118a5256df1 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -190,7 +191,7 @@ public static EqlSearchResponse createRandomEventsResponse(TotalHits totalHits, hits = new EqlSearchResponse.Hits(randomEvents(xType), null, totalHits); } if (randomBoolean()) { - return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean()); + return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean(), ShardSearchFailure.EMPTY_ARRAY); } else { return new EqlSearchResponse( hits, @@ -198,7 +199,8 @@ public static EqlSearchResponse createRandomEventsResponse(TotalHits totalHits, randomBoolean(), randomAlphaOfLength(10), randomBoolean(), - randomBoolean() + randomBoolean(), + ShardSearchFailure.EMPTY_ARRAY ); } } @@ -222,7 +224,7 @@ public static EqlSearchResponse createRandomSequencesResponse(TotalHits totalHit hits = new EqlSearchResponse.Hits(null, seq, totalHits); } if (randomBoolean()) { - return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean()); + return new EqlSearchResponse(hits, randomIntBetween(0, 1001), randomBoolean(), ShardSearchFailure.EMPTY_ARRAY); } else { return new EqlSearchResponse( hits, @@ -230,7 +232,8 @@ public static EqlSearchResponse createRandomSequencesResponse(TotalHits totalHit randomBoolean(), randomAlphaOfLength(10), randomBoolean(), - randomBoolean() + randomBoolean(), + ShardSearchFailure.EMPTY_ARRAY ); } } @@ -273,7 +276,8 @@ protected EqlSearchResponse mutateInstanceForVersion(EqlSearchResponse instance, instance.isTimeout(), instance.id(), instance.isRunning(), - instance.isPartial() + instance.isPartial(), + ShardSearchFailure.EMPTY_ARRAY ); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java index 4d5201f544d72..33573b99546fb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/LocalStateEQLXPackPlugin.java @@ -7,26 +7,41 @@ package org.elasticsearch.xpack.eql.action; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.CircuitBreakerPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.eql.plugin.EqlPlugin; import org.elasticsearch.xpack.ql.plugin.QlPlugin; import java.nio.file.Path; -public class LocalStateEQLXPackPlugin extends LocalStateCompositeXPackPlugin { +public class LocalStateEQLXPackPlugin extends LocalStateCompositeXPackPlugin implements CircuitBreakerPlugin { + + private final EqlPlugin eqlPlugin; public LocalStateEQLXPackPlugin(final Settings settings, final Path configPath) { super(settings, configPath); LocalStateEQLXPackPlugin thisVar = this; - plugins.add(new EqlPlugin() { + this.eqlPlugin = new EqlPlugin() { @Override protected XPackLicenseState getLicenseState() { return thisVar.getLicenseState(); } - }); + }; + plugins.add(eqlPlugin); plugins.add(new QlPlugin()); } + @Override + public BreakerSettings getCircuitBreaker(Settings settings) { + return eqlPlugin.getCircuitBreaker(settings); + } + + @Override + public void setCircuitBreaker(CircuitBreaker circuitBreaker) { + eqlPlugin.setCircuitBreaker(circuitBreaker); + } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index 7bb6a228f6e48..abd928b04a9c7 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -141,7 +141,15 @@ public void testImplicitTiebreakerBeingSet() { booleanArrayOf(stages, false), NOOP_CIRCUIT_BREAKER ); - TumblingWindow window = new TumblingWindow(client, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + client, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(wrap(p -> {}, ex -> { throw ExceptionsHelper.convertToRuntime(ex); })); } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index a8ed842e94c44..f6aa851b2fff0 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -277,7 +277,15 @@ public void test() throws Exception { ); QueryClient testClient = new TestQueryClient(); - TumblingWindow window = new TumblingWindow(testClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + testClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); // finally make the assertion at the end of the listener window.execute(ActionTestUtils.assertNoFailureListener(this::checkResults)); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index dc132659417ff..80b1ff97b725d 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -89,7 +89,7 @@ public void query(QueryRequest r, ActionListener l) {} @Override public void fetchHits(Iterable> refs, ActionListener>> listener) {} - }, mockCriteria(), randomIntBetween(10, 500), new Limit(1000, 0), CIRCUIT_BREAKER, 1); + }, mockCriteria(), randomIntBetween(10, 500), new Limit(1000, 0), CIRCUIT_BREAKER, 1, randomBoolean()); CIRCUIT_BREAKER.startBreaking(); iterator.pushToStack(new SampleIterator.Page(CB_STACK_SIZE_PRECISION - 1)); @@ -142,7 +142,8 @@ public void fetchHits(Iterable> refs, ActionListener> refs, ActionListener { // do nothing, we don't care about the query results }, ex -> { fail("Shouldn't have failed"); })); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index fe1fca45364e3..58448d981fcca 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -146,7 +146,15 @@ public void testCircuitBreakerTumblingWindow() { booleanArrayOf(stages, false), CIRCUIT_BREAKER ); - TumblingWindow window = new TumblingWindow(client, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + client, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(ActionTestUtils.assertNoFailureListener(p -> {})); CIRCUIT_BREAKER.startBreaking(); @@ -228,7 +236,15 @@ private void assertMemoryCleared( booleanArrayOf(sequenceFiltersCount, false), eqlCircuitBreaker ); - TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + eqlClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(ActionListener.noop()); assertTrue(esClient.searchRequestsRemainingCount() == 0); // ensure all the search requests have been asked for @@ -271,7 +287,15 @@ public void testEqlCBCleanedUp_on_ParentCBBreak() { booleanArrayOf(sequenceFiltersCount, false), eqlCircuitBreaker ); - TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + eqlClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute(wrap(p -> fail(), ex -> assertTrue(ex instanceof CircuitBreakingException))); } assertCriticalWarnings("[indices.breaker.total.limit] setting of [0%] is below the recommended minimum of 50.0% of the heap"); @@ -329,6 +353,8 @@ private QueryClient buildQueryClient(ESMockClient esClient, CircuitBreaker eqlCi null, 123, 1, + randomBoolean(), + randomBoolean(), "", new TaskId("test", 123), new EqlSearchTask( diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java index 1a2f00463b49b..2eee6a262e73c 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/PITFailureTests.java @@ -83,6 +83,8 @@ public void testHandlingPitFailure() { null, 123, 1, + randomBoolean(), + randomBoolean(), "", new TaskId("test", 123), new EqlSearchTask( @@ -132,7 +134,15 @@ public void testHandlingPitFailure() { ); SequenceMatcher matcher = new SequenceMatcher(1, false, TimeValue.MINUS_ONE, null, booleanArrayOf(1, false), cb); - TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher, Collections.emptyList()); + TumblingWindow window = new TumblingWindow( + eqlClient, + criteria, + null, + matcher, + Collections.emptyList(), + randomBoolean(), + randomBoolean() + ); window.execute( wrap( p -> { fail("Search succeeded despite PIT failure"); }, diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java index 62703fa400ff7..1f10abf3b9fb0 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/Fixed.java @@ -11,7 +11,6 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -import java.util.function.Function; /** * Used on parameters on methods annotated with {@link Evaluator} to indicate @@ -27,12 +26,23 @@ boolean includeInToString() default true; /** - * Should the Evaluator's factory build this per evaluator with a - * {@code Function} or just take fixed implementation? - * This is typically set to {@code true} to use the {@link Function} - * to make "scratch" objects which have to be isolated in a single thread. - * This is typically set to {@code false} when the parameter is simply - * immutable and can be shared. + * Defines the scope of the parameter. + * - SINGLETON (default) will build a single instance and share it across all evaluators + * - THREAD_LOCAL will build a new instance for each evaluator thread */ - boolean build() default false; + Scope scope() default Scope.SINGLETON; + + /** + * Defines the parameter scope + */ + enum Scope { + /** + * Should be used for immutable parameters that can be shared across different threads + */ + SINGLETON, + /** + * Should be used for mutable or not thread safe parameters + */ + THREAD_LOCAL, + } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java index 5869eff23a9ab..b4a0cf9127f23 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorImplementer.java @@ -16,6 +16,7 @@ import com.squareup.javapoet.TypeSpec; import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.ann.Fixed.Scope; import java.util.ArrayList; import java.util.Arrays; @@ -725,7 +726,7 @@ public String closeInvocation() { } } - private record FixedProcessFunctionArg(TypeName type, String name, boolean includeInToString, boolean build, boolean releasable) + private record FixedProcessFunctionArg(TypeName type, String name, boolean includeInToString, Scope scope, boolean releasable) implements ProcessFunctionArg { @Override @@ -762,12 +763,18 @@ public void implementFactoryCtor(MethodSpec.Builder builder) { } private TypeName factoryFieldType() { - return build ? ParameterizedTypeName.get(ClassName.get(Function.class), DRIVER_CONTEXT, type.box()) : type; + return switch (scope) { + case SINGLETON -> type; + case THREAD_LOCAL -> ParameterizedTypeName.get(ClassName.get(Function.class), DRIVER_CONTEXT, type.box()); + }; } @Override public String factoryInvocation(MethodSpec.Builder factoryMethodBuilder) { - return build ? name + ".apply(context)" : name; + return switch (scope) { + case SINGLETON -> name; + case THREAD_LOCAL -> name + ".apply(context)"; + }; } @Override @@ -1020,7 +1027,7 @@ private ProcessFunction( type, name, fixed.includeInToString(), - fixed.build(), + fixed.scope(), Types.extendsSuper(types, v.asType(), "org.elasticsearch.core.Releasable") ) ); diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index 004beaafb4009..d4b087277df52 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -21,7 +21,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V7; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V8; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @@ -96,7 +96,7 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V7.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V8.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index c75a920e16973..d7c57e23b7147 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V7; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V8; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V7.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V8.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -283,8 +283,8 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - // CCS does not yet support JOIN_LOOKUP_V7 and clusters falsely report they have this capability - // return hasCapabilities(List.of(JOIN_LOOKUP_V7.capabilityName())); + // CCS does not yet support JOIN_LOOKUP_V8 and clusters falsely report they have this capability + // return hasCapabilities(List.of(JOIN_LOOKUP_V8.capabilityName())); return false; } } diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/SemanticMatchIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/SemanticMatchIT.java new file mode 100644 index 0000000000000..0ce84330b0b01 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/SemanticMatchIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.SemanticMatchTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class SemanticMatchIT extends SemanticMatchTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> spec.plugin("inference-service-test")); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/SemanticMatchIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/SemanticMatchIT.java new file mode 100644 index 0000000000000..8edc2dbcf35a2 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/SemanticMatchIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.SemanticMatchTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class SemanticMatchIT extends SemanticMatchTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> spec.plugin("inference-service-test")); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java index 40027249670f6..355c403ce2a86 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java @@ -221,7 +221,7 @@ public void testIndicesDontExist() throws IOException { assertThat(e.getMessage(), containsString("index_not_found_exception")); assertThat(e.getMessage(), containsString("no such index [foo]")); - if (EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()) { + if (EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()) { e = expectThrows( ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM test1 | LOOKUP JOIN foo ON id1")) diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java new file mode 100644 index 0000000000000..aafa57e764ae7 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/SemanticMatchTestCase.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.rest; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.core.StringContains.containsString; + +public abstract class SemanticMatchTestCase extends ESRestTestCase { + public void testWithMultipleInferenceIds() throws IOException { + String query = """ + from test-semantic1,test-semantic2 + | where match(semantic_text_field, "something") + """; + ResponseException re = expectThrows(ResponseException.class, () -> runEsqlQuery(query)); + + assertThat(re.getMessage(), containsString("Field [semantic_text_field] has multiple inference IDs associated with it")); + + assertEquals(400, re.getResponse().getStatusLine().getStatusCode()); + } + + public void testWithInferenceNotConfigured() { + String query = """ + from test-semantic3 + | where match(semantic_text_field, "something") + """; + ResponseException re = expectThrows(ResponseException.class, () -> runEsqlQuery(query)); + + assertThat(re.getMessage(), containsString("Inference endpoint not found")); + assertEquals(404, re.getResponse().getStatusLine().getStatusCode()); + } + + @Before + public void setUpIndices() throws IOException { + assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); + + var settings = Settings.builder().build(); + + String mapping1 = """ + "properties": { + "semantic_text_field": { + "type": "semantic_text", + "inference_id": "test_sparse_inference" + } + } + """; + createIndex(adminClient(), "test-semantic1", settings, mapping1); + + String mapping2 = """ + "properties": { + "semantic_text_field": { + "type": "semantic_text", + "inference_id": "test_dense_inference" + } + } + """; + createIndex(adminClient(), "test-semantic2", settings, mapping2); + + String mapping3 = """ + "properties": { + "semantic_text_field": { + "type": "semantic_text", + "inference_id": "inexistent" + } + } + """; + createIndex(adminClient(), "test-semantic3", settings, mapping3); + } + + @Before + public void setUpTextEmbeddingInferenceEndpoint() throws IOException { + assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); + Request request = new Request("PUT", "_inference/text_embedding/test_dense_inference"); + request.setJsonEntity(""" + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + """); + adminClient().performRequest(request); + } + + @After + public void wipeData() throws IOException { + assumeTrue("semantic text capability not available", EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()); + adminClient().performRequest(new Request("DELETE", "*")); + + try { + adminClient().performRequest(new Request("DELETE", "_inference/test_dense_inference")); + } catch (ResponseException e) { + // 404 here means the endpoint was not created + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + + private Map runEsqlQuery(String query) throws IOException { + RestEsqlTestCase.RequestObjectBuilder builder = RestEsqlTestCase.requestObjectBuilder().query(query); + return RestEsqlTestCase.runEsqlSync(builder); + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 7adafa908ce4f..f0bdf089f69d1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -63,7 +63,7 @@ import static org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes.GEO; public final class CsvTestUtils { - private static final int MAX_WIDTH = 20; + private static final int MAX_WIDTH = 80; private static final CsvPreference CSV_SPEC_PREFERENCES = new CsvPreference.Builder('"', '|', "\r\n").build(); private static final String NULL_VALUE = "null"; private static final char ESCAPE_CHAR = '\\'; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index abfe90f80e372..8e81d14b4dfd7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -104,7 +104,7 @@ public class CsvTestsDataLoader { private static final TestsDataset DISTANCES = new TestsDataset("distances"); private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv").withSetting("k8s-settings.json"); private static final TestsDataset ADDRESSES = new TestsDataset("addresses"); - private static final TestsDataset BOOKS = new TestsDataset("books"); + private static final TestsDataset BOOKS = new TestsDataset("books").withSetting("books-settings.json"); private static final TestsDataset SEMANTIC_TEXT = new TestsDataset("semantic_text").withInferenceEndpoint(true); public static final Map CSV_DATASET_MAP = Map.ofEntries( diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index 18ce9d7e3e057..66fd7d3ee5eb5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -70,6 +70,7 @@ import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.Configuration; +import org.elasticsearch.xpack.esql.session.QueryBuilderResolver; import org.elasticsearch.xpack.esql.stats.Metrics; import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.versionfield.Version; @@ -351,6 +352,8 @@ public String toString() { public static final Verifier TEST_VERIFIER = new Verifier(new Metrics(new EsqlFunctionRegistry()), new XPackLicenseState(() -> 0L)); + public static final QueryBuilderResolver MOCK_QUERY_BUILDER_RESOLVER = new MockQueryBuilderResolver(); + private EsqlTestUtils() {} public static Configuration configuration(QueryPragmas pragmas, String query) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/MockQueryBuilderResolver.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/MockQueryBuilderResolver.java new file mode 100644 index 0000000000000..7af3a89108fc0 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/MockQueryBuilderResolver.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.session.QueryBuilderResolver; +import org.elasticsearch.xpack.esql.session.Result; + +import java.util.function.BiConsumer; + +public class MockQueryBuilderResolver extends QueryBuilderResolver { + public MockQueryBuilderResolver() { + super(null, null, null, null); + } + + @Override + public void resolveQueryBuilders( + LogicalPlan plan, + ActionListener listener, + BiConsumer> callback + ) { + callback.accept(plan, listener); + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books-settings.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books-settings.json new file mode 100644 index 0000000000000..b324c27b40653 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books-settings.json @@ -0,0 +1,5 @@ +{ + "index": { + "number_of_shards": 3 + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv index 1deefaa3c6475..1cb01687e6511 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv @@ -1,80 +1,80 @@ -book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text -2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. -7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. -7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. -2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th -4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. -2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. -7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. -7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. -4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie -9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. -9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature -1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. -2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi -2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments -8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. -8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit -6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. -4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. -4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. -2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. -8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. -6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. -3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college -5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. -5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community -6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa -5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. -3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! -4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. -7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. -3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. -3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. -4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. -6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. -3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. -8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. -1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion -6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. -2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. -7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. -6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. -8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. -2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. -1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). -5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. -2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. -2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. -4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. -5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. -1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. -2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. -4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. -7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. -6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. -2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. -2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. -2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. -9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. -5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. -2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. -7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. -5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. -8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. -4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. -4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. -7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. -1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. -8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. -8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. -8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. -2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. -3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. -5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. -4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. -9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. -5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. -8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. -6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. -8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. +_id:keyword,book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text +0,2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. +1,7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. +2,7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. +3,2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th +4,4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. +5,2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. +6,7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. +7,7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. +8,4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie +9,9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. +10,9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature +11,1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. +12,2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi +13,2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments +14,8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. +15,8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit +16,6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. +17,4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. +18,4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. +19,2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. +20,8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. +21,6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. +22,3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college +23,5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. +24,5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community +25,6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa +26,5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. +27,3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! +28,4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. +29,7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. +30,3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. +31,3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. +32,4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. +33,6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. +34,3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. +35,8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. +36,1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion +37,6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. +38,2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. +39,7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. +40,6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. +41,8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. +42,2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. +43,1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). +44,5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +45,2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +46,2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. +47,4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +48,5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +49,1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. +50,2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +51,4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. +52,7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +53,6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +54,2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. +55,2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. +56,2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. +57,9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. +58,5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. +59,2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. +60,7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. +61,5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. +62,8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. +63,4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +64,4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. +65,7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. +66,1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. +67,8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. +68,8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +69,8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. +70,2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. +71,3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. +72,5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. +73,4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. +74,9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. +75,5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. +76,8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. +77,6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. +78,8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index b29c489910f65..8cfde2bb9bde7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -145,6 +145,24 @@ AVG(salary):double | bucket:date // end::bucket_in_agg-result[] ; +bucketWithOffset#[skip:-8.13.99, reason:BUCKET renamed in 8.14] +// tag::bucketWithOffset[] +FROM employees +| STATS dates = MV_SORT(VALUES(birth_date)) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR +| EVAL d_count = MV_COUNT(dates) +| SORT d_count, b +| LIMIT 3 +// end::bucketWithOffset[] +; + +// tag::bucketWithOffset-result[] +dates:date |b:date |d_count:integer +1965-01-03T00:00:00.000Z |1964-12-31T23:00:00.000Z|1 +[1955-01-21T00:00:00.000Z, 1955-08-20T00:00:00.000Z, 1955-08-28T00:00:00.000Z, 1955-10-04T00:00:00.000Z]|1954-12-31T23:00:00.000Z|4 +[1957-04-04T00:00:00.000Z, 1957-05-23T00:00:00.000Z, 1957-05-25T00:00:00.000Z, 1957-12-03T00:00:00.000Z]|1956-12-31T23:00:00.000Z|4 +// end::bucketWithOffset-result[] +; + docsBucketMonth#[skip:-8.13.99, reason:BUCKET renamed in 8.14] //tag::docsBucketMonth[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index f4b5c98d596ae..4206d6b48699f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -216,6 +216,137 @@ millis:date | nanos:date_nanos | num:long 2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z | 1698068014937193000 ; +implicit casting to nanos, date only +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date only, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +; + + +implicit casting to nanos, date plus time to seconds +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23T00:00:00" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date plus time to seconds, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23T12:27:28" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +; + +implicit casting to nanos, date plus time to millis +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23T00:00:00.000" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date plus time to millis, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23T12:27:28.948" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +; + +implicit casting to nanos, date plus time to nanos +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) > "2023-10-23T00:00:00.000000000" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T13:55:01.543Z | 2023-10-23T13:55:01.543123456Z +2023-10-23T13:53:55.832Z | 2023-10-23T13:53:55.832987654Z +2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015787878Z +2023-10-23T13:51:54.732Z | 2023-10-23T13:51:54.732102837Z +2023-10-23T13:33:34.937Z | 2023-10-23T13:33:34.937193000Z +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360Z | 2023-10-23T12:15:03.360103847Z +; + +implicit casting to nanos, date plus time to nanos, equality test +required_capability: date_nanos_type +required_capability: date_nanos_implicit_casting + +FROM date_nanos +| WHERE MV_MIN(nanos) == "2023-10-23T12:27:28.948000000" +| SORT nanos DESC +| KEEP millis, nanos; + +millis:date | nanos:date_nanos +2023-10-23T12:27:28.948Z | 2023-10-23T12:27:28.948000000Z +; + date nanos greater than millis required_capability: date_nanos_type required_capability: date_nanos_compare_to_millis @@ -555,7 +686,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY yr = BUCKET(nanos, 1 year); +| STATS ct = count(*) BY yr = BUCKET(nanos, 1 year) +| SORT yr DESC; ct:long | yr:date_nanos 8 | 2023-01-01T00:00:00.000000000Z @@ -567,7 +699,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY yr = BUCKET(nanos, 5, "1999-01-01", NOW()); +| STATS ct = count(*) BY yr = BUCKET(nanos, 5, "1999-01-01", NOW()) +| SORT yr DESC; ct:long | yr:date_nanos 8 | 2023-01-01T00:00:00.000000000Z @@ -579,7 +712,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mo = BUCKET(nanos, 1 month); +| STATS ct = count(*) BY mo = BUCKET(nanos, 1 month) +| SORT mo DESC; ct:long | mo:date_nanos 8 | 2023-10-01T00:00:00.000000000Z @@ -591,7 +725,8 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mo = BUCKET(nanos, 20, "2023-01-01", "2023-12-31"); +| STATS ct = count(*) BY mo = BUCKET(nanos, 20, "2023-01-01", "2023-12-31") +| SORT mo DESC; ct:long | mo:date_nanos 8 | 2023-10-01T00:00:00.000000000Z @@ -603,18 +738,21 @@ required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mo = BUCKET(nanos, 55, "2023-01-01", "2023-12-31"); +| STATS ct = count(*) BY mo = BUCKET(nanos, 55, "2023-01-01", "2023-12-31") +| SORT mo DESC; ct:long | mo:date_nanos 8 | 2023-10-23T00:00:00.000000000Z ; + Bucket Date nanos by 10 minutes required_capability: date_trunc_date_nanos required_capability: date_nanos_bucket FROM date_nanos | WHERE millis > "2020-01-01" -| STATS ct = count(*) BY mn = BUCKET(nanos, 10 minutes); +| STATS ct = count(*) BY mn = BUCKET(nanos, 10 minutes) +| SORT mn DESC; ct:long | mn:date_nanos 4 | 2023-10-23T13:50:00.000000000Z diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hash.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hash.csv-spec new file mode 100644 index 0000000000000..fcac1e1859c6d --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/hash.csv-spec @@ -0,0 +1,105 @@ +hash +required_capability: hash_function + +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = hash("md5", message), sha256 = hash("sha256", message) +| KEEP message, md5, sha256; +ignoreOrder:true + +message:keyword | md5:keyword | sha256:keyword +Connected to 10.1.0.1 | abd7d1ce2bb636842a29246b3512dcae | 6d8372129ad78770f7185554dd39864749a62690216460752d6c075fa38ad85c +Connected to 10.1.0.2 | 8f8f1cb60832d153f5b9ec6dc828b93f | b0db24720f15857091b3c99f4c4833586d0ea3229911b8777efb8d917cf27e9a +Connected to 10.1.0.3 | 912b6dc13503165a15de43304bb77c78 | 75b0480188db8acc4d5cc666a51227eb2bc5b989cd8ca912609f33e0846eff57 +Disconnected | ef70e46fd3bbc21e3e1f0b6815e750c0 | 04dfac3671b494ad53fcd152f7a14511bfb35747278aad8ce254a0d6e4ba4718 +; + + +hashOfConvertedType +required_capability: hash_function + +FROM sample_data +| WHERE message != "Connection error" +| EVAL input = event_duration::STRING, md5 = hash("md5", input), sha256 = hash("sha256", input) +| KEEP message, input, md5, sha256; +ignoreOrder:true + +message:keyword | input:keyword | md5:keyword | sha256:keyword +Connected to 10.1.0.1 | 1756467 | c4fc1c57ee9b1d2b2023b70c8c167b54 | 8376a50a7ba7e6bd1bf9ad0c32d27d2f49fd0fa422573f98f239e21048b078f3 +Connected to 10.1.0.2 | 2764889 | 8e8cf005e11a7b5df1d9478a4715a444 | 1031f2bef8eaecbf47319505422300b27ea1f7c38b6717d41332325062f9a56a +Connected to 10.1.0.3 | 3450233 | 09f2c64f5a55e9edf8ffbad336b561d8 | f77d7545769c4ecc85092f4f0b7ec8c20f467e4beb15fe67ca29f9aa8e9a6900 +Disconnected | 1232382 | 6beac1485638d51e13c2c53990a2f611 | 9a03c1274a3ebb6c1cb85d170ce0a6fdb9d2232724e06b9f5e7cb9274af3cad6 +; + + +hashOfEmptyInput +required_capability: hash_function + +ROW input="" | EVAL md5 = hash("md5", input), sha256 = hash("sha256", input); + +input:keyword | md5:keyword | sha256:keyword + | d41d8cd98f00b204e9800998ecf8427e | e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +; + +hashOfNullInput +required_capability: hash_function + +ROW input=null::STRING | EVAL md5 = hash("md5", input), sha256 = hash("sha256", input); + +input:keyword | md5:keyword | sha256:keyword +null | null | null +; + + +hashWithNullAlgorithm +required_capability: hash_function + +ROW input="input" | EVAL hash = hash(null, input); + +input:keyword | hash:keyword +input | null +; + + +hashWithMv +required_capability: hash_function + +ROW input=["foo", "bar"] | mv_expand input | EVAL md5 = hash("md5", input), sha256 = hash("sha256", input); + +input:keyword | md5:keyword | sha256:keyword +foo | acbd18db4cc2f85cedef654fccc4a4d8 | 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae +bar | 37b51d194a7513e45b56f6524f2d51f2 | fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9 +; + + +hashWithNestedFunctions +required_capability: hash_function + +ROW input=["foo", "bar"] | EVAL hash = concat(hash("md5", mv_concat(input, "-")), "-", hash("sha256", mv_concat(input, "-"))); + +input:keyword | hash:keyword +["foo", "bar"] | e5f9ec048d1dbe19c70f720e002f9cb1-7d89c4f517e3bd4b5e8e76687937005b602ea00c5cba3e25ef1fc6575a55103e +; + + +hashWithConvertedTypes +required_capability: hash_function + +ROW input=42 | EVAL md5 = hash("md5", input::STRING), sha256 = hash("sha256", to_string(input)); + +input:integer | md5:keyword | sha256:keyword +42 | a1d0c6e83f027327d8461063f4ac58a6 | 73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049 +; + + +hashWithStats +required_capability: hash_function + +FROM sample_data +| EVAL md5="md5" +| STATS count = count(*) by hash(md5, message) +| WHERE count > 1; + +count:long | hash(md5, message):keyword +3 | 2e92ae79ff32b37fee4368a594792183 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 8bcc2c2ff3502..7d4f89ed920a9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -8,7 +8,7 @@ ############################################### basicOnTheDataNode -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | EVAL language_code = languages @@ -25,7 +25,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -36,7 +36,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -53,7 +53,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | EVAL language_code = languages @@ -71,7 +71,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -89,7 +89,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; sortEvalBeforeLookup -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -106,7 +106,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueLeftKeyOnTheDataNode -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | WHERE emp_no <= 10030 @@ -130,7 +130,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueRightKeyOnTheDataNode -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | EVAL language_code = emp_no % 10 @@ -150,7 +150,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -170,7 +170,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyFromRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW language_code = 2 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -182,12 +182,28 @@ language_code:integer | language_name:keyword | country:keyword 2 | [German, German, German] | [Austria, Germany, Switzerland] ; +repeatedIndexOnFrom +required_capability: join_lookup_v8 +required_capability: join_lookup_repeated_index_from + +FROM languages_lookup +| LOOKUP JOIN languages_lookup ON language_code +| SORT language_code +; + +language_code:integer | language_name:keyword +1 | English +2 | French +3 | Spanish +4 | German +; + ############################################### # Filtering tests with languages_lookup index ############################################### filterOnLeftSide -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | EVAL language_code = languages @@ -204,7 +220,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnRightSide -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -220,7 +236,7 @@ FROM sample_data ; filterOnRightSideAfterStats -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -233,7 +249,7 @@ count:long | type:keyword ; filterOnJoinKey -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | EVAL language_code = languages @@ -248,7 +264,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyAndRightSide -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | WHERE emp_no < 10006 @@ -265,7 +281,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnRightSideOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -281,7 +297,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -297,7 +313,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyAndRightSideOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | SORT emp_no @@ -314,7 +330,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnTheDataNodeThenFilterOnTheCoordinator -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | EVAL language_code = languages @@ -335,7 +351,7 @@ emp_no:integer | language_code:integer | language_name:keyword ########################################################################### nullJoinKeyOnTheDataNode -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | WHERE emp_no < 10004 @@ -352,7 +368,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; mvJoinKeyOnTheDataNode -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM employees | WHERE 10003 < emp_no AND emp_no < 10008 @@ -370,7 +386,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; mvJoinKeyFromRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW language_code = [4, 5, 6, 7] | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -383,7 +399,7 @@ language_code:integer | language_name:keyword | country:keyword ; mvJoinKeyFromRowExpanded -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW language_code = [4, 5, 6, 7, 8] | MV_EXPAND language_code @@ -405,7 +421,7 @@ language_code:integer | language_name:keyword | country:keyword ############################################### lookupIPFromRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -416,7 +432,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromKeepRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", right = "right" | KEEP left, client_ip, right @@ -428,7 +444,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -439,7 +455,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -452,7 +468,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -465,7 +481,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -484,7 +500,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -504,7 +520,7 @@ ignoreOrder:true ; lookupIPFromIndexKeepKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -526,7 +542,7 @@ timestamp:date | client_ip:keyword | event_duration:long | msg:keyword ; lookupIPFromIndexStats -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -542,7 +558,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -559,7 +575,7 @@ count:long | env:keyword ; statsAndLookupIPFromIndex -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -580,7 +596,7 @@ count:long | client_ip:keyword | env:keyword ############################################### lookupMessageFromRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -591,7 +607,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromKeepRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | KEEP left, message, right @@ -603,7 +619,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -614,7 +630,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -626,7 +642,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -644,7 +660,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -663,7 +679,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -683,7 +699,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -702,7 +718,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -717,7 +733,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -733,7 +749,7 @@ count:long | type:keyword ; statsAndLookupMessageFromIndex -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | STATS count = count(message) BY message @@ -751,7 +767,7 @@ count:long | type:keyword | message:keyword ; lookupMessageFromIndexTwice -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -773,7 +789,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -800,7 +816,7 @@ ignoreOrder:true ############################################### lookupIPAndMessageFromRow -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -812,7 +828,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBefore -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | KEEP left, client_ip, message, right @@ -825,7 +841,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBetween -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -838,7 +854,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepAfter -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -851,7 +867,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowing -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", type = "type", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -863,7 +879,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -877,7 +893,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -892,7 +908,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeepKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -908,7 +924,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepReordered -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -922,7 +938,7 @@ right | Development | Success | 172.21.0.5 ; lookupIPAndMessageFromIndex -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -942,7 +958,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -963,7 +979,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexStats -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -981,7 +997,7 @@ count:long | env:keyword | type:keyword ; lookupIPAndMessageFromIndexStatsKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1000,7 +1016,7 @@ count:long | env:keyword | type:keyword ; statsAndLookupIPAndMessageFromIndex -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1019,7 +1035,7 @@ count:long | client_ip:keyword | message:keyword | env:keyword | type:keyw ; lookupIPAndMessageFromIndexChainedEvalKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1041,7 +1057,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexChainedRenameKeep -required_capability: join_lookup_v7 +required_capability: join_lookup_v8 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1061,4 +1077,3 @@ ignoreOrder:true 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | QA | null 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | QA | null ; - diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec index 03b24555dbeff..6c9a6fed3853c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-function.csv-spec @@ -115,6 +115,80 @@ book_no:keyword | title:text 7140 |The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) ; +matchWithDisjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where match(author, "Vonnegut") or match(author, "Guinane") +| keep book_no, author; +ignoreOrder:true + +book_no:keyword | author:text +2464 | Kurt Vonnegut +6970 | Edith Vonnegut +8956 | Kurt Vonnegut +3950 | Kurt Vonnegut +4382 | Carole Guinane +; + +matchWithDisjunctionAndFiltersConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (match(author, "Vonnegut") or match(author, "Guinane")) and year > 1997 +| keep book_no, author, year; +ignoreOrder:true + +book_no:keyword | author:text | year:integer +6970 | Edith Vonnegut | 1998 +4382 | Carole Guinane | 2001 +; + +matchWithDisjunctionAndConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (match(author, "Vonnegut") or match(author, "Marquez")) and match(description, "realism") +| keep book_no; + +book_no:keyword +4814 +; + +matchWithMoreComplexDisjunctionAndConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (match(author, "Vonnegut") and match(description, "charming")) or (match(author, "Marquez") and match(description, "realism")) +| keep book_no; +ignoreOrder:true + +book_no:keyword +6970 +4814 +; + +matchWithDisjunctionIncludingConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where match(author, "Vonnegut") or (match(author, "Marquez") and match(description, "realism")) +| keep book_no; +ignoreOrder:true + +book_no:keyword +2464 +6970 +4814 +8956 +3950 +; + matchWithFunctionPushedToLucene required_capability: match_function @@ -523,3 +597,74 @@ from employees,employees_incompatible emp_no_bool:boolean ; + +testMatchWithSemanticText +required_capability: match_function +required_capability: semantic_text_type + +from semantic_text +| where match(semantic_text_field, "something") +| keep semantic_text_field +| sort semantic_text_field asc +; + +semantic_text_field:semantic_text +all we have to decide is what to do with the time that is given to us +be excellent to each other +live long and prosper +; + +testMatchWithSemanticTextAndKeyword +required_capability: match_function +required_capability: semantic_text_type + +from semantic_text +| where match(semantic_text_field, "something") AND match(host, "host1") +| keep semantic_text_field, host +; + +semantic_text_field:semantic_text | host:keyword +live long and prosper | host1 +; + +testMatchWithSemanticTextMultiValueField +required_capability: match_function +required_capability: semantic_text_type + +from semantic_text metadata _id +| where match(st_multi_value, "something") AND match(host, "host1") +| keep _id, st_multi_value +; + +_id: keyword | st_multi_value:semantic_text +1 | ["Hello there!", "This is a random value", "for testing purposes"] +; + +testMatchWithSemanticTextWithEvalsAndOtherFunctionsAndStats +required_capability: match_function +required_capability: semantic_text_type + +from semantic_text +| where qstr("description:some*") +| eval size = mv_count(st_multi_value) +| where match(semantic_text_field, "something") AND size > 1 AND match(host, "host1") +| STATS result = count(*) +; + +result:long +1 +; + +testMatchWithSemanticTextAndKql +required_capability: match_function +required_capability: semantic_text_type +required_capability: kql_function + +from semantic_text +| where kql("host:host1") AND match(semantic_text_field, "something") +| KEEP host, semantic_text_field +; + +host:keyword | semantic_text_field:semantic_text +"host1" | live long and prosper +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec index 56f7f5ccd8823..721443a70fe20 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec @@ -102,6 +102,81 @@ book_no:keyword | title:text 7140 |The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) ; + +matchWithDisjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where author : "Vonnegut" or author : "Guinane" +| keep book_no, author; +ignoreOrder:true + +book_no:keyword | author:text +2464 | Kurt Vonnegut +6970 | Edith Vonnegut +8956 | Kurt Vonnegut +3950 | Kurt Vonnegut +4382 | Carole Guinane +; + +matchWithDisjunctionAndFiltersConjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where (author : "Vonnegut" or author : "Guinane") and year > 1997 +| keep book_no, author, year; +ignoreOrder:true + +book_no:keyword | author:text | year:integer +6970 | Edith Vonnegut | 1998 +4382 | Carole Guinane | 2001 +; + +matchWithDisjunctionAndConjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where (author : "Vonnegut" or author : "Marquez") and description : "realism" +| keep book_no; + +book_no:keyword +4814 +; + +matchWithMoreComplexDisjunctionAndConjunction +required_capability: match_function +required_capability: full_text_functions_disjunctions + +from books +| where (author : "Vonnegut" and description : "charming") or (author : "Marquez" and description : "realism") +| keep book_no; +ignoreOrder:true + +book_no:keyword +6970 +4814 +; + +matchWithDisjunctionIncludingConjunction +required_capability: match_operator_colon +required_capability: full_text_functions_disjunctions + +from books +| where author : "Vonnegut" or (author : "Marquez" and description : "realism") +| keep book_no; +ignoreOrder:true + +book_no:keyword +2464 +6970 +4814 +8956 +3950 +; + matchWithFunctionPushedToLucene required_capability: match_operator_colon @@ -219,7 +294,7 @@ count(*): long | author.keyword:keyword ; testMatchBooleanField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -235,7 +310,7 @@ Amabile | true | 2.09 ; testMatchIntegerField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -247,7 +322,7 @@ emp_no:integer | first_name:keyword ; testMatchDoubleField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -259,7 +334,7 @@ emp_no:integer | salary_change:double ; testMatchLongField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -271,7 +346,7 @@ num:long ; testMatchUnsignedLongField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from ul_logs @@ -283,7 +358,7 @@ bytes_out:unsigned_long ; testMatchIpFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from sample_data @@ -295,7 +370,7 @@ client_ip:ip | message:keyword ; testMatchDateFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -307,7 +382,7 @@ millis:date ; testMatchDateNanosFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -319,7 +394,7 @@ nanos:date_nanos ; testMatchBooleanFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -335,7 +410,7 @@ Amabile | true | 2.09 ; testMatchIntegerFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -347,7 +422,7 @@ emp_no:integer | first_name:keyword ; testMatchDoubleFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -359,7 +434,7 @@ emp_no:integer | salary_change:double ; testMatchLongFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from date_nanos @@ -371,7 +446,7 @@ num:long ; testMatchUnsignedLongFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from ul_logs @@ -383,7 +458,7 @@ bytes_out:unsigned_long ; testMatchVersionFieldAsString -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from apps @@ -395,7 +470,7 @@ bbbbb | 2.1 ; testMatchIntegerAsDouble -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -408,7 +483,7 @@ emp_no:integer | first_name:keyword ; testMatchDoubleAsIntegerField -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees @@ -423,7 +498,7 @@ emp_no:integer | height:double ; testMatchMultipleFieldTypes -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -440,7 +515,7 @@ emp_as_int:integer | name_as_kw:keyword testMatchMultipleFieldTypesKeywordText -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -455,7 +530,7 @@ Kazuhito ; testMatchMultipleFieldTypesDoubleFloat -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -474,7 +549,7 @@ emp_no:integer | height_dbl:double ; testMatchMultipleFieldTypesBooleanKeyword -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -491,7 +566,7 @@ true ; testMatchMultipleFieldTypesLongUnsignedLong -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -506,7 +581,7 @@ avg_worked_seconds_ul:unsigned_long ; testMatchMultipleFieldTypesDateNanosDate -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -521,7 +596,7 @@ hire_date_nanos:date_nanos ; testMatchWithWrongFieldValue -required_capability: match_function +required_capability: match_operator_colon required_capability: match_additional_types from employees,employees_incompatible @@ -533,3 +608,73 @@ from employees,employees_incompatible emp_no_bool:boolean ; +testMatchWithSemanticText +required_capability: match_operator_colon +required_capability: semantic_text_type + +from semantic_text +| where semantic_text_field:"something" +| keep semantic_text_field +| sort semantic_text_field asc +; + +semantic_text_field:semantic_text +all we have to decide is what to do with the time that is given to us +be excellent to each other +live long and prosper +; + +testMatchWithSemanticTextAndKeyword +required_capability: match_operator_colon +required_capability: semantic_text_type + +from semantic_text +| where semantic_text_field:"something" AND host:"host1" +| keep semantic_text_field, host +; + +semantic_text_field:semantic_text | host:keyword +live long and prosper | host1 +; + +testMatchWithSemanticTextMultiValueField +required_capability: match_operator_colon +required_capability: semantic_text_type + +from semantic_text metadata _id +| where st_multi_value:"something" AND match(host, "host1") +| keep _id, st_multi_value +; + +_id: keyword | st_multi_value:semantic_text +1 | ["Hello there!", "This is a random value", "for testing purposes"] +; + +testMatchWithSemanticTextWithEvalsAndOtherFunctionsAndStats +required_capability: match_operator_colon +required_capability: semantic_text_type + +from semantic_text +| where qstr("description:some*") +| eval size = mv_count(st_multi_value) +| where semantic_text_field:"something" AND size > 1 AND match(host, "host1") +| STATS result = count(*) +; + +result:long +1 +; + +testMatchWithSemanticTextAndKql +required_capability: match_operator_colon +required_capability: semantic_text_type +required_capability: kql_function + +from semantic_text +| where kql("host:host1") AND semantic_text_field:"something" +| KEEP host, semantic_text_field +; + +host:keyword | semantic_text_field:semantic_text +"host1" | live long and prosper +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec index cb38204a71ab0..9d3526982f9ef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec @@ -13,9 +13,9 @@ from books metadata _score | sort c_score desc, book_no asc | LIMIT 2; -book_no:keyword | title:text | c_score:double -2675 | The Lord of the Rings - Boxed Set | 6.0 -4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 6.0 +book_no:keyword | title:text | c_score:double +1463 | Realms of Tolkien: Images of Middle-earth | 6.0 +2675 | The Lord of the Rings - Boxed Set | 6.0 ; singleMatchWithKeywordFieldScoring @@ -28,15 +28,15 @@ from books metadata _score | sort book_no; book_no:keyword | author:text | _score:double -2713 | William Faulkner | 2.3142893314361572 -2883 | William Faulkner | 2.3142893314361572 -4724 | William Faulkner | 2.3142893314361572 -4977 | William Faulkner | 2.3142893314361572 -5119 | William Faulkner | 2.3142893314361572 -5404 | William Faulkner | 2.3142893314361572 -5578 | William Faulkner | 2.3142893314361572 -8077 | William Faulkner | 2.3142893314361572 -9896 | William Faulkner | 2.3142893314361572 +2713 | William Faulkner | 1.7589385509490967 +2883 | William Faulkner | 1.7589385509490967 +4724 | William Faulkner | 1.7589385509490967 +4977 | William Faulkner | 2.6145541667938232 +5119 | William Faulkner | 2.513157367706299 +5404 | William Faulkner | 1.7589385509490967 +5578 | William Faulkner | 2.513157367706299 +8077 | William Faulkner | 1.7589385509490967 +9896 | William Faulkner | 2.6145541667938232 ; qstrWithFieldAndScoringSortedEval @@ -51,9 +51,9 @@ from books metadata _score | limit 3; book_no:keyword | title:text | _score:double -2675 | The Lord of the Rings - Boxed Set | 2.7583377361297607 -7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.9239964485168457 -2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9239964485168457 +2675 | The Lord of the Rings - Boxed Set | 2.5619282722473145 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9245924949645996 +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.746896743774414 ; qstrWithFieldAndScoringSorted @@ -67,9 +67,9 @@ from books metadata _score | limit 3; book_no:keyword | title:text | _score:double -2675 | The Lord of the Rings - Boxed Set | 2.7583377361297607 -7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.9239964485168457 -2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9239964485168457 +2675 | The Lord of the Rings - Boxed Set | 2.5619282722473145 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9245924949645996 +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.746896743774414 ; singleQstrScoringManipulated @@ -84,8 +84,8 @@ from books metadata _score | LIMIT 2; book_no:keyword | author:text | add_score:double -2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 2.0 -2713 | William Faulkner | 7.0 +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 3.0 +2713 | William Faulkner | 6.0 ; testMultiValuedFieldWithConjunctionWithScore @@ -125,7 +125,7 @@ from books metadata _score ignoreOrder:true book_no:keyword | title:text | author:text | _score:double -8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 14.489097595214844 +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 11.193471908569336 ; multipleWhereWithMatchScoring @@ -139,7 +139,7 @@ from books metadata _score | sort book_no; book_no:keyword | title:text | author:text | _score:double -8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 14.489097595214844 +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 11.193471908569336 ; combinedMatchWithFunctionsScoring @@ -153,7 +153,7 @@ from books metadata _score | sort book_no; book_no:keyword | title:text | author:text | year:integer | _score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.448054313659668 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 4.733664035797119 ; singleQstrScoring @@ -167,8 +167,8 @@ from books metadata _score | LIMIT 2; book_no:keyword | author:text | _score:double -2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 0.9976131916046143 -2713 | William Faulkner | 5.9556169509887695 +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 1.3697924613952637 +2713 | William Faulkner | 4.631696701049805 ; singleQstrScoringGrok @@ -183,9 +183,9 @@ from books metadata _score | LIMIT 3; book_no:keyword | title:keyword | _score:double -8875 | The | 2.9505908489227295 -4023 | A | 2.8327860832214355 -2675 | The | 2.7583377361297607 +8875 | The | 2.769660472869873 +1463 | Realms | 2.6714818477630615 +2675 | The | 2.5619282722473145 ; combinedMatchWithScoringEvalNoSort @@ -200,7 +200,7 @@ from books metadata _score ignoreOrder:true book_no:keyword | title:text | author:text | year:integer | c_score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 6 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.0 ; singleQstrScoringRename @@ -215,9 +215,9 @@ from books metadata _score | LIMIT 3; book_no:keyword | rank:double -8875 | 2.9505908489227295 -4023 | 2.8327860832214355 -2675 | 2.7583377361297607 +8875 | 2.769660472869873 +1463 | 2.6714818477630615 +2675 | 2.5619282722473145 ; singleMatchWithTextFieldScoring @@ -231,11 +231,11 @@ from books metadata _score | limit 5; book_no:keyword | author:text | _score:double -2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 0.9976131916046143 -2713 | William Faulkner | 4.272439002990723 -2847 | Colleen Faulkner | 1.7401835918426514 -2883 | William Faulkner | 4.272439002990723 -3293 | Danny Faulkner | 1.7401835918426514 +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 1.3697924613952637 +2713 | William Faulkner | 3.2750158309936523 +2847 | Colleen Faulkner | 1.593343734741211 +2883 | William Faulkner | 3.2750158309936523 +3293 | Danny Faulkner | 1.593343734741211 ; combinedMatchWithFunctionsScoringNoSort @@ -249,7 +249,7 @@ from books metadata _score ignoreOrder:true book_no:keyword | title:text | author:text | year:integer | _score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.448054313659668 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 4.733664035797119 ; combinedMatchWithScoringEval @@ -264,7 +264,7 @@ from books metadata _score | sort book_no; book_no:keyword | title:text | author:text | year:integer | c_score:double -5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 6 +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.0 ; singleQstrScoringEval @@ -280,7 +280,7 @@ from books metadata _score book_no:keyword | c_score:double 8875 | 3.0 -7350 | 2.0 +7350 | 1.0 7140 | 3.0 ; @@ -289,14 +289,16 @@ required_capability: metadata_score required_capability: qstr_function from books metadata _score -| where qstr("title:rings") +| where qstr("title:gentle") | eval _score = _score + 1 | keep book_no, title, _score -| limit 2; +| limit 10 +; +ignoreOrder:true -book_no:keyword | title:text | _score:double -4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 2.6404519081115723 -2714 | Return of the King Being the Third Part of The Lord of the Rings | 2.9239964485168457 +book_no:keyword | title:text | _score:double +2924 | A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | 3.158426523208618 +5948 | That We Are Gentle Creatures | 3.727346897125244 ; QstrScoreOverride @@ -304,12 +306,54 @@ required_capability: metadata_score required_capability: qstr_function from books metadata _score -| where qstr("title:rings") +| where qstr("title:gentle") | eval _score = "foobar" | keep book_no, title, _score -| limit 2; +| limit 10 +; +ignoreOrder:true + +book_no:keyword | title:text | _score:keyword +2924 | A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | foobar +5948 | That We Are Gentle Creatures | foobar +; + + +semanticTextMatch +required_capability: metadata_score +required_capability: semantic_text_type +required_capability: match_function + +from semantic_text metadata _id, _score +| where match(semantic_text_field, "something") +| sort _score desc +| keep _id +; + +_id:keyword +2 +3 +1 +; + +semanticTextMatchWithAllTheTextFunctions + +required_capability: metadata_score +required_capability: semantic_text_type +required_capability: match_function +required_capability: kql_function +required_capability: qstr_function + +from semantic_text metadata _id, _score +| where match(semantic_text_field, "something") + AND match(description, "some") + AND kql("description:some*") + AND NOT qstr("host:host1") +| sort _score desc +| keep _id +; -book_no:keyword | title:text | _score:keyword -4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | foobar -2714 | Return of the King Being the Third Part of The Lord of the Rings | foobar +_id:keyword +2 +3 ; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java index d58637ab52c86..0e84ac7588ad6 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java @@ -10,13 +10,17 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.kql.KqlPlugin; import org.junit.Before; import org.junit.BeforeClass; +import java.util.Collection; import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -141,4 +145,9 @@ private void createAndPopulateIndex() { .get(); ensureYellow(indexName); } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), KqlPlugin.class); + } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java index 58b1652653ca3..ad90bbf6ae9db 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java @@ -168,7 +168,7 @@ public void testWhereMatchWithScoringNoSort() { var query = """ FROM test METADATA _score - | WHERE content:"fox" + | WHERE match(content, "fox") | KEEP id, _score """; @@ -182,7 +182,7 @@ public void testWhereMatchWithScoringNoSort() { public void testNonExistingColumn() { var query = """ FROM test - | WHERE something:"fox" + | WHERE match(something, "fox") """; var error = expectThrows(VerificationException.class, () -> run(query)); @@ -193,14 +193,14 @@ public void testWhereMatchEvalColumn() { var query = """ FROM test | EVAL upper_content = to_upper(content) - | WHERE upper_content:"FOX" + | WHERE match(upper_content, "FOX") | KEEP id """; var error = expectThrows(VerificationException.class, () -> run(query)); assertThat( error.getMessage(), - containsString("[:] operator cannot operate on [upper_content], which is not a field from an index mapping") + containsString("[MATCH] function cannot operate on [upper_content], which is not a field from an index mapping") ); } @@ -209,13 +209,13 @@ public void testWhereMatchOverWrittenColumn() { FROM test | DROP content | EVAL content = CONCAT("document with ID ", to_str(id)) - | WHERE content:"document" + | WHERE match(content, "document") """; var error = expectThrows(VerificationException.class, () -> run(query)); assertThat( error.getMessage(), - containsString("[:] operator cannot operate on [content], which is not a field from an index mapping") + containsString("[MATCH] function cannot operate on [content], which is not a field from an index mapping") ); } @@ -223,7 +223,7 @@ public void testWhereMatchAfterStats() { var query = """ FROM test | STATS count(*) - | WHERE content:"fox" + | WHERE match(content, "fox") """; var error = expectThrows(VerificationException.class, () -> run(query)); @@ -233,14 +233,15 @@ public void testWhereMatchAfterStats() { public void testWhereMatchWithFunctions() { var query = """ FROM test - | WHERE content:"fox" OR to_upper(content) == "FOX" + | WHERE match(content, "fox") OR to_upper(content) == "FOX" """; var error = expectThrows(ElasticsearchException.class, () -> run(query)); assertThat( error.getMessage(), containsString( - "Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. " - + "[:] operator can't be used as part of an or condition" + "Invalid condition [match(content, \"fox\") OR to_upper(content) == \"FOX\"]. " + + "Full text functions can be used in an OR condition," + + " but only if just full text functions are used in the OR condition" ) ); } @@ -248,24 +249,24 @@ public void testWhereMatchWithFunctions() { public void testWhereMatchWithRow() { var query = """ ROW content = "a brown fox" - | WHERE content:"fox" + | WHERE match(content, "fox") """; var error = expectThrows(ElasticsearchException.class, () -> run(query)); assertThat( error.getMessage(), - containsString("[:] operator cannot operate on [\"a brown fox\"], which is not a field from an index mapping") + containsString("[MATCH] function cannot operate on [\"a brown fox\"], which is not a field from an index mapping") ); } public void testMatchWithinEval() { var query = """ FROM test - | EVAL matches_query = content:"fox" + | EVAL matches_query = match(content, "fox") """; var error = expectThrows(VerificationException.class, () -> run(query)); - assertThat(error.getMessage(), containsString("[:] operator is only supported in WHERE commands")); + assertThat(error.getMessage(), containsString("[MATCH] function is only supported in WHERE commands")); } private void createAndPopulateIndex() { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java index d0a641f086fe4..758878b46d51f 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java @@ -216,7 +216,8 @@ public void testWhereMatchWithFunctions() { error.getMessage(), containsString( "Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. " - + "[:] operator can't be used as part of an or condition" + + "Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition" ) ); } diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashConstantEvaluator.java new file mode 100644 index 0000000000000..34cff73018634 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashConstantEvaluator.java @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.function.Function; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Hash}. + * This class is generated. Do not edit it. + */ +public final class HashConstantEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final BreakingBytesRefBuilder scratch; + + private final Hash.HashFunction algorithm; + + private final EvalOperator.ExpressionEvaluator input; + + private final DriverContext driverContext; + + private Warnings warnings; + + public HashConstantEvaluator(Source source, BreakingBytesRefBuilder scratch, + Hash.HashFunction algorithm, EvalOperator.ExpressionEvaluator input, + DriverContext driverContext) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock inputBlock = (BytesRefBlock) input.eval(page)) { + BytesRefVector inputVector = inputBlock.asVector(); + if (inputVector == null) { + return eval(page.getPositionCount(), inputBlock); + } + return eval(page.getPositionCount(), inputVector).asBlock(); + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock inputBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (inputBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (inputBlock.getValueCount(p) != 1) { + if (inputBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBytesRef(Hash.processConstant(this.scratch, this.algorithm, inputBlock.getBytesRef(inputBlock.getFirstValueIndex(p), inputScratch))); + } + return result.build(); + } + } + + public BytesRefVector eval(int positionCount, BytesRefVector inputVector) { + try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(Hash.processConstant(this.scratch, this.algorithm, inputVector.getBytesRef(p, inputScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "HashConstantEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(scratch, input); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final Function scratch; + + private final Function algorithm; + + private final EvalOperator.ExpressionEvaluator.Factory input; + + public Factory(Source source, Function scratch, + Function algorithm, + EvalOperator.ExpressionEvaluator.Factory input) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + } + + @Override + public HashConstantEvaluator get(DriverContext context) { + return new HashConstantEvaluator(source, scratch.apply(context), algorithm.apply(context), input.get(context), context); + } + + @Override + public String toString() { + return "HashConstantEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashEvaluator.java new file mode 100644 index 0000000000000..8b01cc0330142 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashEvaluator.java @@ -0,0 +1,174 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.security.NoSuchAlgorithmException; +import java.util.function.Function; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Hash}. + * This class is generated. Do not edit it. + */ +public final class HashEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final BreakingBytesRefBuilder scratch; + + private final EvalOperator.ExpressionEvaluator algorithm; + + private final EvalOperator.ExpressionEvaluator input; + + private final DriverContext driverContext; + + private Warnings warnings; + + public HashEvaluator(Source source, BreakingBytesRefBuilder scratch, + EvalOperator.ExpressionEvaluator algorithm, EvalOperator.ExpressionEvaluator input, + DriverContext driverContext) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock algorithmBlock = (BytesRefBlock) algorithm.eval(page)) { + try (BytesRefBlock inputBlock = (BytesRefBlock) input.eval(page)) { + BytesRefVector algorithmVector = algorithmBlock.asVector(); + if (algorithmVector == null) { + return eval(page.getPositionCount(), algorithmBlock, inputBlock); + } + BytesRefVector inputVector = inputBlock.asVector(); + if (inputVector == null) { + return eval(page.getPositionCount(), algorithmBlock, inputBlock); + } + return eval(page.getPositionCount(), algorithmVector, inputVector); + } + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock algorithmBlock, + BytesRefBlock inputBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef algorithmScratch = new BytesRef(); + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (algorithmBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (algorithmBlock.getValueCount(p) != 1) { + if (algorithmBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + if (inputBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (inputBlock.getValueCount(p) != 1) { + if (inputBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendBytesRef(Hash.process(this.scratch, algorithmBlock.getBytesRef(algorithmBlock.getFirstValueIndex(p), algorithmScratch), inputBlock.getBytesRef(inputBlock.getFirstValueIndex(p), inputScratch))); + } catch (NoSuchAlgorithmException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public BytesRefBlock eval(int positionCount, BytesRefVector algorithmVector, + BytesRefVector inputVector) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef algorithmScratch = new BytesRef(); + BytesRef inputScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendBytesRef(Hash.process(this.scratch, algorithmVector.getBytesRef(p, algorithmScratch), inputVector.getBytesRef(p, inputScratch))); + } catch (NoSuchAlgorithmException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "HashEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(scratch, algorithm, input); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final Function scratch; + + private final EvalOperator.ExpressionEvaluator.Factory algorithm; + + private final EvalOperator.ExpressionEvaluator.Factory input; + + public Factory(Source source, Function scratch, + EvalOperator.ExpressionEvaluator.Factory algorithm, + EvalOperator.ExpressionEvaluator.Factory input) { + this.source = source; + this.scratch = scratch; + this.algorithm = algorithm; + this.input = input; + } + + @Override + public HashEvaluator get(DriverContext context) { + return new HashEvaluator(source, scratch.apply(context), algorithm.get(context), input.get(context), context); + } + + @Override + public String toString() { + return "HashEvaluator[" + "algorithm=" + algorithm + ", input=" + input + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 4fcabb02b2d4f..a6e0f1d89c364 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -352,7 +352,10 @@ public enum Cap { * Support for mixed comparisons between nanosecond and millisecond dates */ DATE_NANOS_COMPARE_TO_MILLIS(), - + /** + * Support implicit casting of strings to date nanos + */ + DATE_NANOS_IMPLICIT_CASTING(), /** * Support Least and Greatest functions on Date Nanos type */ @@ -446,6 +449,11 @@ public enum Cap { */ KQL_FUNCTION(Build.current().isSnapshot()), + /** + * Hash function + */ + HASH_FUNCTION, + /** * Don't optimize CASE IS NOT NULL function by not requiring the fields to be not null as well. * https://github.com/elastic/elasticsearch/issues/112704 @@ -547,7 +555,12 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V7(Build.current().isSnapshot()), + JOIN_LOOKUP_V8(Build.current().isSnapshot()), + + /** + * LOOKUP JOIN with the same index as the FROM + */ + JOIN_LOOKUP_REPEATED_INDEX_FROM(JOIN_LOOKUP_V8.isEnabled()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 @@ -582,7 +595,12 @@ public enum Cap { /** * Fix for regex folding with case-insensitive pattern https://github.com/elastic/elasticsearch/issues/118371 */ - FIXED_REGEX_FOLD; + FIXED_REGEX_FOLD, + + /** + * Full text functions can be used in disjunctions + */ + FULL_TEXT_FUNCTIONS_DISJUNCTIONS; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index d59745f03f608..ecd0821c626bf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -118,6 +118,7 @@ import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; @@ -234,6 +235,37 @@ private LogicalPlan resolveIndex(UnresolvedRelation plan, IndexResolution indexR } EsIndex esIndex = indexResolution.get(); + + if (plan.indexMode().equals(IndexMode.LOOKUP)) { + String indexResolutionMessage = null; + + var indexNameWithModes = esIndex.indexNameWithModes(); + if (indexNameWithModes.size() != 1) { + indexResolutionMessage = "invalid [" + + table + + "] resolution in lookup mode to [" + + indexNameWithModes.size() + + "] indices"; + } else if (indexNameWithModes.values().iterator().next() != IndexMode.LOOKUP) { + indexResolutionMessage = "invalid [" + + table + + "] resolution in lookup mode to an index in [" + + indexNameWithModes.values().iterator().next() + + "] mode"; + } + + if (indexResolutionMessage != null) { + return new UnresolvedRelation( + plan.source(), + plan.table(), + plan.frozen(), + plan.metadataFields(), + plan.indexMode(), + indexResolutionMessage, + plan.commandName() + ); + } + } var attributes = mappingAsAttributes(plan.source(), esIndex.mapping()); attributes.addAll(plan.metadataFields()); return new EsRelation(plan.source(), esIndex, attributes.isEmpty() ? NO_FIELDS : attributes, plan.indexMode()); @@ -1050,21 +1082,23 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { /** * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison, In and GroupingFunction to desired data types. * For example, the string literals in the following expressions will be cast implicitly to the field data type on the left hand side. - * date > "2024-08-21" - * date in ("2024-08-21", "2024-08-22", "2024-08-23") - * date = "2024-08-21" + 3 days - * ip == "127.0.0.1" - * version != "1.0" - * bucket(dateField, "1 month") - * date_trunc("1 minute", dateField) - * + *
    + *
  • date > "2024-08-21"
  • + *
  • date in ("2024-08-21", "2024-08-22", "2024-08-23")
  • + *
  • date = "2024-08-21" + 3 days
  • + *
  • ip == "127.0.0.1"
  • + *
  • version != "1.0"
  • + *
  • bucket(dateField, "1 month")
  • + *
  • date_trunc("1 minute", dateField)
  • + *
* If the inputs to Coalesce are mixed numeric types, cast the rest of the numeric field or value to the first numeric data type if * applicable. For example, implicit casting converts: - * Coalesce(Long, Int) to Coalesce(Long, Long) - * Coalesce(null, Long, Int) to Coalesce(null, Long, Long) - * Coalesce(Double, Long, Int) to Coalesce(Double, Double, Double) - * Coalesce(null, Double, Long, Int) to Coalesce(null, Double, Double, Double) - * + *
    + *
  • Coalesce(Long, Int) to Coalesce(Long, Long)
  • + *
  • Coalesce(null, Long, Int) to Coalesce(null, Long, Long)
  • + *
  • Coalesce(Double, Long, Int) to Coalesce(Double, Double, Double)
  • + *
  • Coalesce(null, Double, Long, Int) to Coalesce(null, Double, Double, Double)
  • + *
* Coalesce(Int, Long) will NOT be converted to Coalesce(Long, Long) or Coalesce(Int, Int). */ private static class ImplicitCasting extends ParameterizedRule { @@ -1245,7 +1279,7 @@ private static boolean supportsImplicitTemporalCasting(Expression e, BinaryOpera } private static boolean supportsStringImplicitCasting(DataType type) { - return type == DATETIME || type == IP || type == VERSION || type == BOOLEAN; + return type == DATETIME || type == DATE_NANOS || type == IP || type == VERSION || type == BOOLEAN; } private static UnresolvedAttribute unresolvedAttribute(Expression value, String type, Exception e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index f01cc265e330b..93e9d59ed8c6e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -766,41 +766,78 @@ private static void checkRemoteEnrich(LogicalPlan plan, Set failures) { } /** - * Checks whether a condition contains a disjunction with the specified typeToken. Adds to failure if it does. + * Checks whether a condition contains a disjunction with a full text search. + * If it does, check that every element of the disjunction is a full text search or combinations (AND, OR, NOT) of them. + * If not, add a failure to the failures collection. * - * @param condition condition to check for disjunctions + * @param condition condition to check for disjunctions of full text searches * @param typeNameProvider provider for the type name to add in the failure message * @param failures failures collection to add to */ - private static void checkNotPresentInDisjunctions( + private static void checkFullTextSearchDisjunctions( Expression condition, java.util.function.Function typeNameProvider, Set failures ) { - condition.forEachUp(Or.class, or -> { - checkNotPresentInDisjunctions(or.left(), or, typeNameProvider, failures); - checkNotPresentInDisjunctions(or.right(), or, typeNameProvider, failures); + int failuresCount = failures.size(); + condition.forEachDown(Or.class, or -> { + if (failures.size() > failuresCount) { + // Exit early if we already have a failures + return; + } + boolean hasFullText = or.anyMatch(FullTextFunction.class::isInstance); + if (hasFullText) { + boolean hasOnlyFullText = onlyFullTextFunctionsInExpression(or); + if (hasOnlyFullText == false) { + failures.add( + fail( + or, + "Invalid condition [{}]. Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition", + or.sourceText() + ) + ); + } + } }); } /** - * Checks whether a condition contains a disjunction with the specified typeToken. Adds to failure if it does. + * Checks whether an expression contains just full text functions or negations (NOT) and combinations (AND, OR) of full text functions * - * @param parentExpression parent expression to add to the failure message - * @param or disjunction that is being checked - * @param failures failures collection to add to + * @param expression expression to check + * @return true if all children are full text functions or negations of full text functions, false otherwise */ - private static void checkNotPresentInDisjunctions( - Expression parentExpression, - Or or, - java.util.function.Function elementName, - Set failures - ) { - parentExpression.forEachDown(FullTextFunction.class, ftp -> { - failures.add( - fail(or, "Invalid condition [{}]. {} can't be used as part of an or condition", or.sourceText(), elementName.apply(ftp)) - ); - }); + private static boolean onlyFullTextFunctionsInExpression(Expression expression) { + if (expression instanceof FullTextFunction) { + return true; + } else if (expression instanceof Not) { + return onlyFullTextFunctionsInExpression(expression.children().get(0)); + } else if (expression instanceof BinaryLogic binaryLogic) { + return onlyFullTextFunctionsInExpression(binaryLogic.left()) && onlyFullTextFunctionsInExpression(binaryLogic.right()); + } + + return false; + } + + /** + * Checks whether an expression contains a full text function as part of it + * + * @param expression expression to check + * @return true if the expression or any of its children is a full text function, false otherwise + */ + private static boolean anyFullTextFunctionsInExpression(Expression expression) { + if (expression instanceof FullTextFunction) { + return true; + } + + for (Expression child : expression.children()) { + if (anyFullTextFunctionsInExpression(child)) { + return true; + } + } + + return false; } /** @@ -828,7 +865,6 @@ private static void checkJoin(LogicalPlan plan, Set failures) { ); } } - } } @@ -870,7 +906,7 @@ private static void checkFullTextQueryFunctions(LogicalPlan plan, Set f m -> "[" + m.functionName() + "] " + m.functionType(), failures ); - checkNotPresentInDisjunctions(condition, ftf -> "[" + ftf.functionName() + "] " + ftf.functionType(), failures); + checkFullTextSearchDisjunctions(condition, ftf -> "[" + ftf.functionName() + "] " + ftf.functionType(), failures); checkFullTextFunctionsParents(condition, failures); } else { plan.forEachExpression(FullTextFunction.class, ftf -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index c1269009c6a41..dad63d25046d9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.session.IndexResolver; +import org.elasticsearch.xpack.esql.session.QueryBuilderResolver; import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.stats.Metrics; import org.elasticsearch.xpack.esql.stats.PlanningMetrics; @@ -59,6 +60,7 @@ public void esql( EsqlExecutionInfo executionInfo, IndicesExpressionGrouper indicesExpressionGrouper, EsqlSession.PlanRunner planRunner, + QueryBuilderResolver queryBuilderResolver, ActionListener listener ) { final PlanningMetrics planningMetrics = new PlanningMetrics(); @@ -73,7 +75,8 @@ public void esql( mapper, verifier, planningMetrics, - indicesExpressionGrouper + indicesExpressionGrouper, + queryBuilderResolver ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index a59ef5bb1575d..908c9c5f197a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -129,6 +129,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Hash; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; @@ -327,6 +328,7 @@ private static FunctionDefinition[][] functions() { def(ByteLength.class, ByteLength::new, "byte_length"), def(Concat.class, Concat::new, "concat"), def(EndsWith.class, EndsWith::new, "ends_with"), + def(Hash.class, Hash::new, "hash"), def(LTrim.class, LTrim::new, "ltrim"), def(Left.class, Left::new, "left"), def(Length.class, Length::new, "length"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java index 0b2268fe1b022..e695a94198dab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java @@ -51,6 +51,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.IP; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; +import static org.elasticsearch.xpack.esql.core.type.DataType.SEMANTIC_TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; @@ -70,6 +71,7 @@ public class Match extends FullTextFunction implements Validatable { public static final Set FIELD_DATA_TYPES = Set.of( KEYWORD, TEXT, + SEMANTIC_TEXT, BOOLEAN, DATETIME, DATE_NANOS, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 347d542f5212d..12932ba8d6e11 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -163,6 +163,17 @@ another in which the bucket size is provided directly (two parameters). grouping part, or that it is invoked with the exact same expression:""", file = "bucket", tag = "reuseGroupingFunctionWithExpression" + ), + @Example( + description = """ + Sometimes you need to change the start value of each bucket by a given duration (similar to date histogram + aggregation's <> parameter). To do so, you will need to + take into account how the language handles expressions within the `STATS` command: if these contain functions or + arithmetic operators, a virtual `EVAL` is inserted before and/or after the `STATS` command. Consequently, a double + compensation is needed to adjust the bucketed date value before the aggregation and then again after. For instance, + inserting a negative offset of `1 hour` to buckets of `1 year` looks like this:""", + file = "bucket", + tag = "bucketWithOffset" ) } ) public Bucket( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java index 192ca6c43e57d..3cf0eef9074ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Hash; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Repeat; @@ -64,6 +65,7 @@ public static List getNamedWriteables() { entries.add(E.ENTRY); entries.add(EndsWith.ENTRY); entries.add(Greatest.ENTRY); + entries.add(Hash.ENTRY); entries.add(Hypot.ENTRY); entries.add(In.ENTRY); entries.add(InsensitiveEquals.ENTRY); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java index 7f9d0d3f2e647..832c511a2dc50 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64.java @@ -30,6 +30,7 @@ import java.util.Base64; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -85,7 +86,7 @@ protected NodeInfo info() { } @Evaluator() - static BytesRef process(BytesRef field, @Fixed(includeInToString = false, build = true) BytesRefBuilder oScratch) { + static BytesRef process(BytesRef field, @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRefBuilder oScratch) { byte[] bytes = new byte[field.length]; System.arraycopy(field.bytes, field.offset, bytes, 0, field.length); oScratch.grow(field.length); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java index c23cef31f32f5..e78968bb209b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64.java @@ -30,6 +30,7 @@ import java.util.Base64; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -78,7 +79,7 @@ protected NodeInfo info() { } @Evaluator(warnExceptions = { ArithmeticException.class }) - static BytesRef process(BytesRef field, @Fixed(includeInToString = false, build = true) BytesRefBuilder oScratch) { + static BytesRef process(BytesRef field, @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRefBuilder oScratch) { int outLength = Math.multiplyExact(4, (Math.addExact(field.length, 2) / 3)); byte[] bytes = new byte[field.length]; System.arraycopy(field.bytes, field.offset, bytes, 0, field.length); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java index 26e75e752f681..5fc61c5c07b58 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefix.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.THIRD; @@ -138,7 +139,7 @@ static BytesRef process( BytesRef ip, int prefixLengthV4, int prefixLengthV6, - @Fixed(includeInToString = false, build = true) BytesRef scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef scratch ) { if (prefixLengthV4 < 0 || prefixLengthV4 > 32) { throw new IllegalArgumentException("Prefix length v4 must be in range [0, 32], found " + prefixLengthV4); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java index cf49607893aae..4dd447f938880 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSum.java @@ -33,6 +33,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isFoldable; @@ -144,7 +145,7 @@ static void process( DoubleBlock.Builder builder, int position, DoubleBlock block, - @Fixed(includeInToString = false, build = true) CompensatedSum sum, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) CompensatedSum sum, @Fixed double p ) { sum.reset(0, 0); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java index f3a63c835bd34..4e4aee307f1c7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java @@ -35,6 +35,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; @@ -167,7 +168,7 @@ static void process( int position, DoubleBlock values, double percentile, - @Fixed(includeInToString = false, build = true) DoubleSortingScratch scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) DoubleSortingScratch scratch ) { int valueCount = values.getValueCount(position); int firstValueIndex = values.getFirstValueIndex(position); @@ -190,7 +191,7 @@ static void process( int position, IntBlock values, double percentile, - @Fixed(includeInToString = false, build = true) IntSortingScratch scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) IntSortingScratch scratch ) { int valueCount = values.getValueCount(position); int firstValueIndex = values.getFirstValueIndex(position); @@ -213,7 +214,7 @@ static void process( int position, LongBlock values, double percentile, - @Fixed(includeInToString = false, build = true) LongSortingScratch scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) LongSortingScratch scratch ) { int valueCount = values.getValueCount(position); int firstValueIndex = values.getFirstValueIndex(position); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java index 46ecc9e026d3d..eb173029876d3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java @@ -32,6 +32,7 @@ import java.util.stream.Stream; import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -111,7 +112,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { } @Evaluator - static BytesRef process(@Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, BytesRef[] values) { + static BytesRef process(@Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, BytesRef[] values) { scratch.grow(checkedTotalLength(values)); scratch.clear(); for (int i = 0; i < values.length; i++) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Hash.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Hash.java new file mode 100644 index 0000000000000..99c5908699ec2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Hash.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; + +public class Hash extends EsqlScalarFunction { + + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Hash", Hash::new); + + private final Expression algorithm; + private final Expression input; + + @FunctionInfo( + returnType = "keyword", + description = "Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512." + ) + public Hash( + Source source, + @Param(name = "algorithm", type = { "keyword", "text" }, description = "Hash algorithm to use.") Expression algorithm, + @Param(name = "input", type = { "keyword", "text" }, description = "Input to hash.") Expression input + ) { + super(source, List.of(algorithm, input)); + this.algorithm = algorithm; + this.input = input; + } + + private Hash(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(algorithm); + out.writeNamedWriteable(input); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(algorithm, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + return isString(input, sourceText(), SECOND); + } + + @Override + public boolean foldable() { + return algorithm.foldable() && input.foldable(); + } + + @Evaluator(warnExceptions = NoSuchAlgorithmException.class) + static BytesRef process( + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, + BytesRef algorithm, + BytesRef input + ) throws NoSuchAlgorithmException { + return hash(scratch, MessageDigest.getInstance(algorithm.utf8ToString()), input); + } + + @Evaluator(extraName = "Constant") + static BytesRef processConstant( + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, + @Fixed(scope = THREAD_LOCAL) HashFunction algorithm, + BytesRef input + ) { + return hash(scratch, algorithm.digest, input); + } + + private static BytesRef hash(BreakingBytesRefBuilder scratch, MessageDigest algorithm, BytesRef input) { + algorithm.reset(); + algorithm.update(input.bytes, input.offset, input.length); + var digest = algorithm.digest(); + scratch.clear(); + scratch.grow(digest.length * 2); + appendUtf8HexDigest(scratch, digest); + return scratch.bytesRefView(); + } + + private static final byte[] ASCII_HEX_BYTES = new byte[] { 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 97, 98, 99, 100, 101, 102 }; + + /** + * This function allows to append hex bytes dirrectly to the {@link BreakingBytesRefBuilder} + * bypassing unnecessary array allocations and byte array copying. + */ + private static void appendUtf8HexDigest(BreakingBytesRefBuilder scratch, byte[] bytes) { + for (byte b : bytes) { + scratch.append(ASCII_HEX_BYTES[b >> 4 & 0xf]); + scratch.append(ASCII_HEX_BYTES[b & 0xf]); + } + } + + @Override + public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + if (algorithm.foldable()) { + try { + // hash function is created here in order to validate the algorithm is valid before evaluator is created + var hf = HashFunction.create((BytesRef) algorithm.fold()); + return new HashConstantEvaluator.Factory( + source(), + context -> new BreakingBytesRefBuilder(context.breaker(), "hash"), + new Function<>() { + @Override + public HashFunction apply(DriverContext context) { + return hf.copy(); + } + + @Override + public String toString() { + return hf.toString(); + } + }, + toEvaluator.apply(input) + ); + } catch (NoSuchAlgorithmException e) { + throw new InvalidArgumentException(e, "invalid algorithm for [{}]: {}", sourceText(), e.getMessage()); + } + } else { + return new HashEvaluator.Factory( + source(), + context -> new BreakingBytesRefBuilder(context.breaker(), "hash"), + toEvaluator.apply(algorithm), + toEvaluator.apply(input) + ); + } + } + + @Override + public Expression replaceChildren(List newChildren) { + return new Hash(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Hash::new, children().get(0), children().get(1)); + } + + public record HashFunction(String algorithm, MessageDigest digest) { + + public static HashFunction create(BytesRef literal) throws NoSuchAlgorithmException { + var algorithm = literal.utf8ToString(); + var digest = MessageDigest.getInstance(algorithm); + return new HashFunction(algorithm, digest); + } + + public HashFunction copy() { + try { + return new HashFunction(algorithm, MessageDigest.getInstance(algorithm)); + } catch (NoSuchAlgorithmException e) { + assert false : "Algorithm should be valid at this point"; + throw new IllegalStateException(e); + } + } + + @Override + public String toString() { + return algorithm; + } + } + + Expression algorithm() { + return algorithm; + } + + Expression input() { + return input; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java index e7572caafd8f5..0d885e3f3c341 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -77,8 +78,8 @@ public String getWriteableName() { @Evaluator static BytesRef process( - @Fixed(includeInToString = false, build = true) BytesRef out, - @Fixed(includeInToString = false, build = true) UnicodeUtil.UTF8CodePoint cp, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef out, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) UnicodeUtil.UTF8CodePoint cp, BytesRef str, int length ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java index 2cc14399df2ae..e91f03de3dd7e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Repeat.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -101,7 +102,7 @@ public boolean foldable() { @Evaluator(extraName = "Constant", warnExceptions = { IllegalArgumentException.class }) static BytesRef processConstantNumber( - @Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, BytesRef str, @Fixed int number ) { @@ -109,7 +110,11 @@ static BytesRef processConstantNumber( } @Evaluator(warnExceptions = { IllegalArgumentException.class }) - static BytesRef process(@Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, BytesRef str, int number) { + static BytesRef process( + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, + BytesRef str, + int number + ) { if (number < 0) { throw new IllegalArgumentException("Number parameter cannot be negative, found [" + number + "]"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java index b069b984ea81e..e0ebed29cca72 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; @@ -77,8 +78,8 @@ public String getWriteableName() { @Evaluator static BytesRef process( - @Fixed(includeInToString = false, build = true) BytesRef out, - @Fixed(includeInToString = false, build = true) UnicodeUtil.UTF8CodePoint cp, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef out, + @Fixed(includeInToString = false, scope = THREAD_LOCAL) UnicodeUtil.UTF8CodePoint cp, BytesRef str, int length ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java index 6481ce5764e1f..3b9a466966911 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Space.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; @@ -82,7 +83,7 @@ protected TypeResolution resolveType() { } @Evaluator(warnExceptions = { IllegalArgumentException.class }) - static BytesRef process(@Fixed(includeInToString = false, build = true) BreakingBytesRefBuilder scratch, int number) { + static BytesRef process(@Fixed(includeInToString = false, scope = THREAD_LOCAL) BreakingBytesRefBuilder scratch, int number) { checkNumber(number); scratch.grow(number); scratch.setLength(number); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java index b1f5da56d011b..24762122f755b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Split.java @@ -29,6 +29,7 @@ import java.io.IOException; +import static org.elasticsearch.compute.ann.Fixed.Scope.THREAD_LOCAL; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions.isStringAndExact; @@ -110,7 +111,7 @@ static void process( BytesRefBlock.Builder builder, BytesRef str, @Fixed byte delim, - @Fixed(includeInToString = false, build = true) BytesRef scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef scratch ) { scratch.bytes = str.bytes; scratch.offset = str.offset; @@ -140,7 +141,7 @@ static void process( BytesRefBlock.Builder builder, BytesRef str, BytesRef delim, - @Fixed(includeInToString = false, build = true) BytesRef scratch + @Fixed(includeInToString = false, scope = THREAD_LOCAL) BytesRef scratch ) { checkDelimiter(delim); process(builder, str, delim.bytes[delim.offset], scratch); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 24398afa18010..49d77bc36fb2e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -564,6 +564,11 @@ public PlanFactory visitJoinCommand(EsqlBaseParser.JoinCommandContext ctx) { } } + var matchFieldsCount = joinFields.size(); + if (matchFieldsCount > 1) { + throw new ParsingException(source, "JOIN ON clause only supports one field at the moment, found [{}]", matchFieldsCount); + } + return p -> new LookupJoin(source, p, right, joinFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 37f89891860d8..a312d048db0ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; +import org.elasticsearch.xpack.esql.core.tree.Node; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -40,6 +41,7 @@ import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.mapper.LocalMapper; import org.elasticsearch.xpack.esql.planner.mapper.Mapper; @@ -48,9 +50,12 @@ import org.elasticsearch.xpack.esql.stats.SearchStats; import java.util.ArrayList; +import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; import static java.util.Arrays.asList; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; @@ -105,10 +110,27 @@ public static Set planConcreteIndices(PhysicalPlan plan) { return Set.of(); } var indices = new LinkedHashSet(); - plan.forEachUp(FragmentExec.class, f -> f.fragment().forEachUp(EsRelation.class, r -> indices.addAll(r.index().concreteIndices()))); + // TODO: This only works for LEFT join, we still need to support RIGHT join + forEachUpWithChildren(plan, node -> { + if (node instanceof FragmentExec f) { + f.fragment().forEachUp(EsRelation.class, r -> indices.addAll(r.index().concreteIndices())); + } + }, node -> node instanceof LookupJoinExec join ? List.of(join.left()) : node.children()); return indices; } + /** + * Similar to {@link Node#forEachUp(Consumer)}, but with a custom callback to get the node children. + */ + private static > void forEachUpWithChildren( + T node, + Consumer action, + Function> childrenGetter + ) { + childrenGetter.apply(node).forEach(c -> forEachUpWithChildren(c, action, childrenGetter)); + action.accept(node); + } + /** * Returns the original indices specified in the FROM command of the query. We need the original query to resolve alias filters. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 9b59b98a7cdc2..e77a2443df2dd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -63,12 +63,8 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; -import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; @@ -81,7 +77,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -167,11 +162,9 @@ public void execute( Map clusterToConcreteIndices = transportService.getRemoteClusterService() .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); - Set lookupIndexNames = findLookupIndexNames(physicalPlan); - Set concreteIndexNames = selectConcreteIndices(clusterToConcreteIndices, lookupIndexNames); if (dataNodePlan == null) { - if (concreteIndexNames.isEmpty() == false) { - String error = "expected no concrete indices without data node plan; got " + concreteIndexNames; + if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0) == false) { + String error = "expected no concrete indices without data node plan; got " + clusterToConcreteIndices; assert false : error; listener.onFailure(new IllegalStateException(error)); return; @@ -194,7 +187,7 @@ public void execute( return; } } else { - if (concreteIndexNames.isEmpty()) { + if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { var error = "expected concrete indices with data node plan but got empty; data node plan " + dataNodePlan; assert false : error; listener.onFailure(new IllegalStateException(error)); @@ -268,42 +261,6 @@ public void execute( } } - private Set selectConcreteIndices(Map clusterToConcreteIndices, Set indexesToIgnore) { - Set concreteIndexNames = new HashSet<>(); - clusterToConcreteIndices.forEach((clusterAlias, concreteIndices) -> { - for (String index : concreteIndices.indices()) { - if (indexesToIgnore.contains(index) == false) { - concreteIndexNames.add(index); - } - } - }); - return concreteIndexNames; - } - - private Set findLookupIndexNames(PhysicalPlan physicalPlan) { - Set lookupIndexNames = new HashSet<>(); - // When planning JOIN on the coordinator node: "LookupJoinExec.lookup()->FragmentExec.fragment()->EsRelation.index()" - physicalPlan.forEachDown( - LookupJoinExec.class, - lookupJoinExec -> lookupJoinExec.lookup() - .forEachDown( - FragmentExec.class, - frag -> frag.fragment().forEachDown(EsRelation.class, esRelation -> lookupIndexNames.add(esRelation.index().name())) - ) - ); - // When planning JOIN on the data node: "FragmentExec.fragment()->Join.right()->EsRelation.index()" - // TODO this only works for LEFT join, so we still need to support RIGHT join - physicalPlan.forEachDown( - FragmentExec.class, - fragmentExec -> fragmentExec.fragment() - .forEachDown( - Join.class, - join -> join.right().forEachDown(EsRelation.class, esRelation -> lookupIndexNames.add(esRelation.index().name())) - ) - ); - return lookupIndexNames; - } - // For queries like: FROM logs* | LIMIT 0 (including cross-cluster LIMIT 0 queries) private static void updateShardCountForCoordinatorOnlyQuery(EsqlExecutionInfo execInfo) { if (execInfo.isCrossClusterSearch()) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 76bfb95d07926..50d5819688e46 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -42,6 +43,7 @@ import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.EsqlSession.PlanRunner; +import org.elasticsearch.xpack.esql.session.QueryBuilderResolver; import org.elasticsearch.xpack.esql.session.Result; import java.io.IOException; @@ -68,6 +70,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; private final RemoteClusterService remoteClusterService; + private final QueryBuilderResolver queryBuilderResolver; @Inject @SuppressWarnings("this-escape") @@ -82,7 +85,8 @@ public TransportEsqlQueryAction( BigArrays bigArrays, BlockFactory blockFactory, Client client, - NamedWriteableRegistry registry + NamedWriteableRegistry registry, + IndexNameExpressionResolver indexNameExpressionResolver ) { // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 @@ -121,6 +125,7 @@ public TransportEsqlQueryAction( bigArrays ); this.remoteClusterService = transportService.getRemoteClusterService(); + this.queryBuilderResolver = new QueryBuilderResolver(searchService, clusterService, transportService, indexNameExpressionResolver); } @Override @@ -191,6 +196,7 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener toResponse(task, request, configuration, result)) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index c0290fa2b1d73..bd3b3bdb3483c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -113,6 +113,7 @@ public interface PlanRunner { private final PhysicalPlanOptimizer physicalPlanOptimizer; private final PlanningMetrics planningMetrics; private final IndicesExpressionGrouper indicesExpressionGrouper; + private final QueryBuilderResolver queryBuilderResolver; public EsqlSession( String sessionId, @@ -125,7 +126,8 @@ public EsqlSession( Mapper mapper, Verifier verifier, PlanningMetrics planningMetrics, - IndicesExpressionGrouper indicesExpressionGrouper + IndicesExpressionGrouper indicesExpressionGrouper, + QueryBuilderResolver queryBuilderResolver ) { this.sessionId = sessionId; this.configuration = configuration; @@ -139,6 +141,7 @@ public EsqlSession( this.physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); this.planningMetrics = planningMetrics; this.indicesExpressionGrouper = indicesExpressionGrouper; + this.queryBuilderResolver = queryBuilderResolver; } public String sessionId() { @@ -158,7 +161,16 @@ public void execute(EsqlQueryRequest request, EsqlExecutionInfo executionInfo, P new EsqlSessionCCSUtils.CssPartialErrorsActionListener(executionInfo, listener) { @Override public void onResponse(LogicalPlan analyzedPlan) { - executeOptimizedPlan(request, executionInfo, planRunner, optimizedPlan(analyzedPlan), listener); + try { + var optimizedPlan = optimizedPlan(analyzedPlan); + queryBuilderResolver.resolveQueryBuilders( + optimizedPlan, + listener, + (newPlan, next) -> executeOptimizedPlan(request, executionInfo, planRunner, newPlan, next) + ); + } catch (Exception e) { + listener.onFailure(e); + } } } ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/QueryBuilderResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/QueryBuilderResolver.java new file mode 100644 index 0000000000000..b6424c5f7fa56 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/QueryBuilderResolver.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.core.util.Holder; +import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; + +/** + * Some {@link FullTextFunction} implementations such as {@link org.elasticsearch.xpack.esql.expression.function.fulltext.Match} + * will be translated to a {@link QueryBuilder} that require a rewrite phase on the coordinator. + * {@link QueryBuilderResolver#resolveQueryBuilders(LogicalPlan, ActionListener, BiConsumer)} will rewrite the plan by replacing + * {@link FullTextFunction} expression with new ones that hold rewritten {@link QueryBuilder}s. + */ +public class QueryBuilderResolver { + private final SearchService searchService; + private final ClusterService clusterService; + private final TransportService transportService; + private final IndexNameExpressionResolver indexNameExpressionResolver; + + public QueryBuilderResolver( + SearchService searchService, + ClusterService clusterService, + TransportService transportService, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + this.searchService = searchService; + this.clusterService = clusterService; + this.transportService = transportService; + this.indexNameExpressionResolver = indexNameExpressionResolver; + } + + public void resolveQueryBuilders( + LogicalPlan plan, + ActionListener listener, + BiConsumer> callback + ) { + // TODO: remove once SEMANTIC_TEXT_TYPE is enabled outside of snapshots + if (false == EsqlCapabilities.Cap.SEMANTIC_TEXT_TYPE.isEnabled()) { + callback.accept(plan, listener); + return; + } + + if (plan.optimized() == false) { + listener.onFailure(new IllegalStateException("Expected optimized plan before query builder rewrite.")); + return; + } + + Set unresolved = fullTextFunctions(plan); + Set indexNames = indexNames(plan); + + if (indexNames == null || indexNames.isEmpty() || unresolved.isEmpty()) { + callback.accept(plan, listener); + return; + } + QueryRewriteContext ctx = queryRewriteContext(indexNames); + FullTextFunctionsRewritable rewritable = new FullTextFunctionsRewritable(unresolved); + Rewriteable.rewriteAndFetch(rewritable, ctx, new ActionListener() { + @Override + public void onResponse(FullTextFunctionsRewritable fullTextFunctionsRewritable) { + try { + LogicalPlan newPlan = planWithResolvedQueryBuilders(plan, fullTextFunctionsRewritable.results()); + callback.accept(newPlan, listener); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + private Set fullTextFunctions(LogicalPlan plan) { + Set functions = new HashSet<>(); + plan.forEachExpressionDown(FullTextFunction.class, func -> functions.add(func)); + return functions; + } + + public Set indexNames(LogicalPlan plan) { + Holder> indexNames = new Holder<>(); + + plan.forEachDown(EsRelation.class, esRelation -> { indexNames.set(esRelation.index().concreteIndices()); }); + + return indexNames.get(); + } + + public LogicalPlan planWithResolvedQueryBuilders(LogicalPlan plan, Map newQueryBuilders) { + LogicalPlan newPlan = plan.transformExpressionsDown(FullTextFunction.class, m -> { + if (newQueryBuilders.keySet().contains(m)) { + return m.replaceQueryBuilder(newQueryBuilders.get(m)); + } + return m; + }); + // The given plan was already analyzed and optimized, so we set the resulted plan to optimized as well. + newPlan.setOptimized(); + return newPlan; + } + + private QueryRewriteContext queryRewriteContext(Set indexNames) { + ResolvedIndices resolvedIndices = ResolvedIndices.resolveWithIndexNamesAndOptions( + indexNames.toArray(String[]::new), + IndexResolver.FIELD_CAPS_INDICES_OPTIONS, + clusterService.state(), + indexNameExpressionResolver, + transportService.getRemoteClusterService(), + System.currentTimeMillis() + ); + + return searchService.getRewriteContext(() -> System.currentTimeMillis(), resolvedIndices, null); + } + + private class FullTextFunctionsRewritable implements Rewriteable { + + private final Map queryBuilderMap; + + FullTextFunctionsRewritable(Map queryBuilderMap) { + this.queryBuilderMap = queryBuilderMap; + } + + FullTextFunctionsRewritable(Set functions) { + this.queryBuilderMap = new HashMap<>(); + + for (FullTextFunction func : functions) { + queryBuilderMap.put(func, func.asQuery(PlannerUtils.TRANSLATOR_HANDLER).asBuilder()); + } + } + + @Override + public FullTextFunctionsRewritable rewrite(QueryRewriteContext ctx) throws IOException { + Map results = new HashMap<>(); + + boolean hasChanged = false; + for (FullTextFunction func : queryBuilderMap.keySet()) { + var initial = queryBuilderMap.get(func); + var rewritten = Rewriteable.rewrite(initial, ctx, false); + + if (rewritten.equals(initial) == false) { + hasChanged = true; + } + + results.put(func, rewritten); + } + + return hasChanged ? new FullTextFunctionsRewritable(results) : this; + } + + public Map results() { + return queryBuilderMap; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 6ba2d8451f956..0847f71b1fb01 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -13,6 +13,8 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; @@ -51,7 +53,6 @@ import java.time.Period; import java.time.ZoneId; import java.time.temporal.ChronoField; -import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalAmount; import java.util.List; import java.util.Locale; @@ -200,6 +201,9 @@ public static Converter converterFor(DataType from, DataType to) { if (to == DataType.DATETIME) { return EsqlConverter.STRING_TO_DATETIME; } + if (to == DATE_NANOS) { + return EsqlConverter.STRING_TO_DATE_NANOS; + } if (to == DataType.IP) { return EsqlConverter.STRING_TO_IP; } @@ -514,13 +518,12 @@ public static long dateTimeToLong(String dateTime, DateFormatter formatter) { } public static long dateNanosToLong(String dateNano) { - return dateNanosToLong(dateNano, DateFormatter.forPattern("strict_date_optional_time_nanos")); + return dateNanosToLong(dateNano, DEFAULT_DATE_NANOS_FORMATTER); } public static long dateNanosToLong(String dateNano, DateFormatter formatter) { - TemporalAccessor parsed = formatter.parse(dateNano); - long nanos = parsed.getLong(ChronoField.INSTANT_SECONDS) * 1_000_000_000 + parsed.getLong(ChronoField.NANO_OF_SECOND); - return nanos; + Instant parsed = DateFormatters.from(formatter.parse(dateNano)).toInstant(); + return DateUtils.toLong(parsed); } public static String dateTimeToString(long dateTime) { @@ -639,6 +642,7 @@ public enum EsqlConverter implements Converter { STRING_TO_TIME_DURATION(x -> EsqlDataTypeConverter.parseTemporalAmount(x, DataType.TIME_DURATION)), STRING_TO_CHRONO_FIELD(EsqlDataTypeConverter::stringToChrono), STRING_TO_DATETIME(x -> EsqlDataTypeConverter.dateTimeToLong((String) x)), + STRING_TO_DATE_NANOS(x -> EsqlDataTypeConverter.dateNanosToLong((String) x)), STRING_TO_IP(x -> EsqlDataTypeConverter.stringToIP((String) x)), STRING_TO_VERSION(x -> EsqlDataTypeConverter.stringToVersion((String) x)), STRING_TO_DOUBLE(x -> EsqlDataTypeConverter.stringToDouble((String) x)), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 717ac7b5a62a7..e627f99322f08 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -263,7 +263,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V7.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V8.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", @@ -322,13 +322,14 @@ private void doTest() throws Exception { } protected void assertResults(ExpectedResults expected, ActualResults actual, boolean ignoreOrder, Logger logger) { - CsvAssert.assertResults(expected, actual, ignoreOrder, logger); /* - * Comment the assertion above and enable the next two lines to see the results returned by ES without any assertions being done. + * Enable the next two lines to see the results returned by ES. * This is useful when creating a new test or trying to figure out what are the actual results. */ // CsvTestUtils.logMetaData(actual.columnNames(), actual.columnTypes(), LOGGER); // CsvTestUtils.logData(actual.values(), LOGGER); + + CsvAssert.assertResults(expected, actual, ignoreOrder, logger); } private static IndexResolution loadIndexResolution(String mappingName, String indexName, Map typeMapping) { @@ -445,7 +446,8 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { mapper, TEST_VERIFIER, new PlanningMetrics(), - null + null, + EsqlTestUtils.MOCK_QUERY_BUILDER_RESOLVER ); TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(testDataset); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 35364089127cc..2deedb927331d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -204,7 +204,7 @@ private Page randomPage(List columns) { case BOOLEAN -> ((BooleanBlock.Builder) builder).appendBoolean(randomBoolean()); case UNSUPPORTED -> ((BytesRefBlock.Builder) builder).appendNull(); // TODO - add a random instant thing here? - case DATE_NANOS -> ((LongBlock.Builder) builder).appendLong(randomLong()); + case DATE_NANOS -> ((LongBlock.Builder) builder).appendLong(randomNonNegativeLong()); case VERSION -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); case GEO_POINT -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.asWkb(GeometryTestUtils.randomPoint())); case CARTESIAN_POINT -> ((BytesRefBlock.Builder) builder).appendBytesRef(CARTESIAN.asWkb(ShapeTestUtils.randomPoint())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index 85dd36ba0aaa5..d4e786a9d9bb0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -46,6 +46,10 @@ public static Analyzer analyzer(IndexResolution indexResolution) { return analyzer(indexResolution, TEST_VERIFIER); } + public static Analyzer analyzer(IndexResolution indexResolution, Map lookupResolution) { + return analyzer(indexResolution, lookupResolution, TEST_VERIFIER); + } + public static Analyzer analyzer(IndexResolution indexResolution, Verifier verifier) { return new Analyzer( new AnalyzerContext( @@ -59,6 +63,19 @@ public static Analyzer analyzer(IndexResolution indexResolution, Verifier verifi ); } + public static Analyzer analyzer(IndexResolution indexResolution, Map lookupResolution, Verifier verifier) { + return new Analyzer( + new AnalyzerContext( + EsqlTestUtils.TEST_CFG, + new EsqlFunctionRegistry(), + indexResolution, + lookupResolution, + defaultEnrichResolution() + ), + verifier + ); + } + public static Analyzer analyzer(IndexResolution indexResolution, Verifier verifier, Configuration config) { return new Analyzer( new AnalyzerContext(config, new EsqlFunctionRegistry(), indexResolution, defaultLookupResolution(), defaultEnrichResolution()), @@ -111,7 +128,7 @@ public static IndexResolution loadMapping(String resource, String indexName, Ind } public static IndexResolution loadMapping(String resource, String indexName) { - EsIndex test = new EsIndex(indexName, EsqlTestUtils.loadMapping(resource)); + EsIndex test = new EsIndex(indexName, EsqlTestUtils.loadMapping(resource), Map.of(indexName, IndexMode.STANDARD)); return IndexResolution.valid(test); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 9c71f20dcde0e..5d1ff43dfe31b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2139,7 +2139,7 @@ public void testLookupMatchTypeWrong() { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2168,7 +2168,7 @@ public void testLookupJoinUnknownIndex() { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; @@ -2190,6 +2190,35 @@ public void testLookupJoinUnknownField() { assertThat(e.getMessage(), containsString(errorMessage3 + "right side of join")); } + public void testLookupJoinIndexMode() { + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + + var indexResolution = AnalyzerTestUtils.expandedDefaultIndexResolution(); + var lookupResolution = AnalyzerTestUtils.defaultLookupResolution(); + var indexResolutionAsLookup = Map.of("test", indexResolution); + var lookupResolutionAsIndex = lookupResolution.get("languages_lookup"); + + analyze("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); + analyze( + "FROM languages_lookup | LOOKUP JOIN languages_lookup ON language_code", + AnalyzerTestUtils.analyzer(lookupResolutionAsIndex, lookupResolution) + ); + + VerificationException e = expectThrows( + VerificationException.class, + () -> analyze( + "FROM languages_lookup | EVAL languages = language_code | LOOKUP JOIN test ON languages", + AnalyzerTestUtils.analyzer(lookupResolutionAsIndex, indexResolutionAsLookup) + ) + ); + assertThat(e.getMessage(), containsString("1:70: invalid [test] resolution in lookup mode to an index in [standard] mode")); + e = expectThrows( + VerificationException.class, + () -> analyze("FROM test | LOOKUP JOIN test ON languages", AnalyzerTestUtils.analyzer(indexResolution, indexResolutionAsLookup)) + ); + assertThat(e.getMessage(), containsString("1:25: invalid [test] resolution in lookup mode to an index in [standard] mode")); + } + public void testImplicitCasting() { var e = expectThrows(VerificationException.class, () -> analyze(""" from test | eval x = concat("2024", "-04", "-01") + 1 day diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 205c8943d4e3c..549ddce03c206 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.LoadMapping; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; @@ -111,6 +112,46 @@ public void testTooBigQuery() { assertEquals("-1:-1: ESQL statement is too large [1000011 characters > 1000000]", error(query.toString())); } + public void testJoinOnConstant() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + assertEquals( + "1:55: JOIN ON clause only supports fields at the moment, found [123]", + error("row languages = 1, gender = \"f\" | lookup join test on 123") + ); + assertEquals( + "1:55: JOIN ON clause only supports fields at the moment, found [\"abc\"]", + error("row languages = 1, gender = \"f\" | lookup join test on \"abc\"") + ); + assertEquals( + "1:55: JOIN ON clause only supports fields at the moment, found [false]", + error("row languages = 1, gender = \"f\" | lookup join test on false") + ); + } + + public void testJoinOnMultipleFields() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + assertEquals( + "1:35: JOIN ON clause only supports one field at the moment, found [2]", + error("row languages = 1, gender = \"f\" | lookup join test on gender, languages") + ); + } + + public void testJoinTwiceOnTheSameField() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + assertEquals( + "1:35: JOIN ON clause only supports one field at the moment, found [2]", + error("row languages = 1, gender = \"f\" | lookup join test on languages, languages") + ); + } + + public void testJoinTwiceOnTheSameField_TwoLookups() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); + assertEquals( + "1:80: JOIN ON clause only supports one field at the moment, found [2]", + error("row languages = 1, gender = \"f\" | lookup join test on languages | eval x = 1 | lookup join test on gender, gender") + ); + } + private String functionName(EsqlFunctionRegistry registry, Expression functionCall) { for (FunctionDefinition def : registry.listFunctions()) { if (functionCall.getClass().equals(def.clazz())) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 182e87d1ab9dd..43d764ab2007d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -1166,12 +1166,14 @@ public void testMatchInsideEval() throws Exception { public void testMatchFilter() throws Exception { assertEquals( "1:19: Invalid condition [first_name:\"Anna\" or starts_with(first_name, \"Anne\")]. " - + "[:] operator can't be used as part of an or condition", + + "Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition", error("from test | where first_name:\"Anna\" or starts_with(first_name, \"Anne\")") ); assertEquals( - "1:51: Invalid condition [first_name:\"Anna\" OR new_salary > 100]. " + "[:] operator can't be used as part of an or condition", + "1:51: Invalid condition [first_name:\"Anna\" OR new_salary > 100]. Full text functions can be" + + " used in an OR condition, but only if just full text functions are used in the OR condition", error("from test | eval new_salary = salary + 10 | where first_name:\"Anna\" OR new_salary > 100") ); } @@ -1409,48 +1411,56 @@ public void testMatchOperatorWithDisjunctions() { } private void checkWithDisjunctions(String functionName, String functionInvocation, String functionType) { + String expression = functionInvocation + " or length(first_name) > 12"; + checkdisjunctionError("1:19", expression, functionName, functionType); + expression = "(" + functionInvocation + " or first_name is not null) or (length(first_name) > 12 and match(last_name, \"Smith\"))"; + checkdisjunctionError("1:19", expression, functionName, functionType); + expression = functionInvocation + " or (last_name is not null and first_name is null)"; + checkdisjunctionError("1:19", expression, functionName, functionType); + } + + private void checkdisjunctionError(String position, String expression, String functionName, String functionType) { assertEquals( LoggerMessageFormat.format( null, - "1:19: Invalid condition [{} or length(first_name) > 12]. " - + "[{}] " - + functionType - + " can't be used as part of an or condition", - functionInvocation, - functionName - ), - error("from test | where " + functionInvocation + " or length(first_name) > 12") - ); - assertEquals( - LoggerMessageFormat.format( - null, - "1:19: Invalid condition [({} and first_name is not null) or (length(first_name) > 12 and first_name is null)]. " - + "[{}] " - + functionType - + " can't be used as part of an or condition", - functionInvocation, - functionName - ), - error( - "from test | where (" - + functionInvocation - + " and first_name is not null) or (length(first_name) > 12 and first_name is null)" - ) - ); - assertEquals( - LoggerMessageFormat.format( - null, - "1:19: Invalid condition [({} and first_name is not null) or first_name is null]. " - + "[{}] " - + functionType - + " can't be used as part of an or condition", - functionInvocation, - functionName + "{}: Invalid condition [{}]. Full text functions can be used in an OR condition, " + + "but only if just full text functions are used in the OR condition", + position, + expression ), - error("from test | where (" + functionInvocation + " and first_name is not null) or first_name is null") + error("from test | where " + expression) ); } + public void testFullTextFunctionsDisjunctions() { + checkWithFullTextFunctionsDisjunctions("MATCH", "match(last_name, \"Smith\")", "function"); + checkWithFullTextFunctionsDisjunctions(":", "last_name : \"Smith\"", "operator"); + checkWithFullTextFunctionsDisjunctions("QSTR", "qstr(\"last_name: Smith\")", "function"); + + assumeTrue("KQL function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + checkWithFullTextFunctionsDisjunctions("KQL", "kql(\"last_name: Smith\")", "function"); + } + + private void checkWithFullTextFunctionsDisjunctions(String functionName, String functionInvocation, String functionType) { + + String expression = functionInvocation + " or length(first_name) > 10"; + checkdisjunctionError("1:19", expression, functionName, functionType); + + expression = "match(last_name, \"Anneke\") or (" + functionInvocation + " and length(first_name) > 10)"; + checkdisjunctionError("1:19", expression, functionName, functionType); + + expression = "(" + + functionInvocation + + " and length(first_name) > 0) or (match(last_name, \"Anneke\") and length(first_name) > 10)"; + checkdisjunctionError("1:19", expression, functionName, functionType); + + query("from test | where " + functionInvocation + " or match(first_name, \"Anna\")"); + query("from test | where " + functionInvocation + " or not match(first_name, \"Anna\")"); + query("from test | where (" + functionInvocation + " or match(first_name, \"Anna\")) and length(first_name) > 10"); + query("from test | where (" + functionInvocation + " or match(first_name, \"Anna\")) and match(last_name, \"Smith\")"); + query("from test | where " + functionInvocation + " or (match(first_name, \"Anna\") and match(last_name, \"Smith\"))"); + } + public void testQueryStringFunctionWithNonBooleanFunctions() { checkFullTextFunctionsWithNonBooleanFunctions("QSTR", "qstr(\"first_name: Anna\")", "function"); } @@ -1964,7 +1974,7 @@ public void testSortByAggregate() { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); @@ -1975,7 +1985,11 @@ public void testLookupJoinDataTypeMismatch() { } private void query(String query) { - defaultAnalyzer.analyze(parser.createStatement(query)); + query(query, defaultAnalyzer); + } + + private void query(String query, Analyzer analyzer) { + analyzer.analyze(parser.createStatement(query)); } private String error(String query) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 6dd0c5fe88afd..050293e58c19d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -14,10 +14,15 @@ import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; public abstract class AbstractExpressionSerializationTests extends AbstractNodeSerializationTests { + public static Expression randomChild() { return ReferenceAttributeTests.randomReferenceAttribute(false); } + public static Expression mutateExpression(Expression expression) { + return randomValueOtherThan(expression, AbstractExpressionSerializationTests::randomChild); + } + @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(ExpressionWritables.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashSerializationTests.java new file mode 100644 index 0000000000000..f21105c2c8bca --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashSerializationTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class HashSerializationTests extends AbstractExpressionSerializationTests { + + @Override + protected Hash createTestInstance() { + return new Hash(randomSource(), randomChild(), randomChild()); + } + + @Override + protected Hash mutateInstance(Hash instance) throws IOException { + return randomBoolean() + ? new Hash(instance.source(), mutateExpression(instance.algorithm()), instance.input()) + : new Hash(instance.source(), instance.algorithm(), mutateExpression(instance.input())); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashStaticTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashStaticTests.java new file mode 100644 index 0000000000000..871bec7c06804 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashStaticTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase.evaluator; +import static org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase.field; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; + +public class HashStaticTests extends ESTestCase { + + public void testInvalidAlgorithmLiteral() { + Source source = new Source(0, 0, "hast(\"invalid\", input)"); + DriverContext driverContext = driverContext(); + InvalidArgumentException e = expectThrows( + InvalidArgumentException.class, + () -> evaluator( + new Hash(source, new Literal(source, new BytesRef("invalid"), DataType.KEYWORD), field("input", DataType.KEYWORD)) + ).get(driverContext) + ); + assertThat(e.getMessage(), startsWith("invalid algorithm for [hast(\"invalid\", input)]: invalid MessageDigest not available")); + } + + /** + * The following fields and methods were borrowed from AbstractScalarFunctionTestCase + */ + private final List breakers = Collections.synchronizedList(new ArrayList<>()); + + private DriverContext driverContext() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(256)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + return new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + } + + @After + public void allMemoryReleased() { + for (CircuitBreaker breaker : breakers) { + assertThat(breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashTests.java new file mode 100644 index 0000000000000..c5cdf97eccd17 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/HashTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.HexFormat; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class HashTests extends AbstractScalarFunctionTestCase { + + public HashTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + for (String algorithm : List.of("MD5", "SHA", "SHA-224", "SHA-256", "SHA-384", "SHA-512")) { + cases.addAll(createTestCases(algorithm)); + } + cases.add(new TestCaseSupplier("Invalid algorithm", List.of(DataType.KEYWORD, DataType.KEYWORD), () -> { + var input = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("invalid"), DataType.KEYWORD, "algorithm"), + new TestCaseSupplier.TypedData(new BytesRef(input), DataType.KEYWORD, "input") + ), + "HashEvaluator[algorithm=Attribute[channel=0], input=Attribute[channel=1]]", + DataType.KEYWORD, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning("Line -1:-1: java.security.NoSuchAlgorithmException: invalid MessageDigest not available") + .withFoldingException(InvalidArgumentException.class, "invalid algorithm for []: invalid MessageDigest not available"); + })); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases, (v, p) -> "string"); + } + + private static List createTestCases(String algorithm) { + return List.of( + createTestCase(algorithm, false, DataType.KEYWORD, DataType.KEYWORD), + createTestCase(algorithm, false, DataType.KEYWORD, DataType.TEXT), + createTestCase(algorithm, false, DataType.TEXT, DataType.KEYWORD), + createTestCase(algorithm, false, DataType.TEXT, DataType.TEXT), + createTestCase(algorithm, true, DataType.KEYWORD, DataType.KEYWORD), + createTestCase(algorithm, true, DataType.KEYWORD, DataType.TEXT), + createTestCase(algorithm, true, DataType.TEXT, DataType.KEYWORD), + createTestCase(algorithm, true, DataType.TEXT, DataType.TEXT) + ); + } + + private static TestCaseSupplier createTestCase(String algorithm, boolean forceLiteral, DataType algorithmType, DataType inputType) { + return new TestCaseSupplier(algorithm, List.of(algorithmType, inputType), () -> { + var input = randomFrom(TestCaseSupplier.stringCases(inputType)).get(); + return new TestCaseSupplier.TestCase( + List.of(createTypedData(algorithm, forceLiteral, algorithmType, "algorithm"), input), + forceLiteral + ? "HashConstantEvaluator[algorithm=" + algorithm + ", input=Attribute[channel=0]]" + : "HashEvaluator[algorithm=Attribute[channel=0], input=Attribute[channel=1]]", + DataType.KEYWORD, + equalTo(new BytesRef(hash(algorithm, BytesRefs.toString(input.data())))) + ); + }); + } + + private static TestCaseSupplier.TypedData createTypedData(String value, boolean forceLiteral, DataType type, String name) { + var data = new TestCaseSupplier.TypedData(new BytesRef(value), type, name); + return forceLiteral ? data.forceLiteral() : data; + } + + private static String hash(String algorithm, String input) { + try { + return HexFormat.of().formatHex(MessageDigest.getInstance(algorithm).digest(input.getBytes(StandardCharsets.UTF_8))); + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException("Unknown algorithm: " + algorithm); + } + } + + @Override + protected Expression build(Source source, List args) { + return new Hash(source, args.get(0), args.get(1)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 879a413615202..928c849b847d5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.license.XPackLicenseState; @@ -57,6 +58,7 @@ import org.elasticsearch.xpack.esql.plan.physical.EvalExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -1448,6 +1450,11 @@ private void checkMatchFunctionPushDown( var analyzer = makeAnalyzer("mapping-all-types.json"); // Check for every possible query data type for (DataType fieldDataType : fieldDataTypes) { + // TODO: semantic_text is not present in mapping-all-types.json so we skip it for now + if (fieldDataType == DataType.SEMANTIC_TEXT) { + continue; + } + var queryValue = randomQueryValue(fieldDataType); String fieldName = fieldDataType == DataType.DATETIME ? "date" : fieldDataType.name().toLowerCase(Locale.ROOT); @@ -1481,6 +1488,7 @@ private static Object randomQueryValue(DataType dataType) { case KEYWORD -> randomAlphaOfLength(5); case IP -> NetworkAddress.format(randomIp(randomBoolean())); case TEXT -> randomAlphaOfLength(50); + case SEMANTIC_TEXT -> randomAlphaOfLength(5); case VERSION -> VersionUtils.randomVersion(random()).toString(); default -> throw new IllegalArgumentException("Unexpected type: " + dataType); }; @@ -1543,6 +1551,46 @@ public void testMultipleMatchFilterPushdown() { assertThat(actualLuceneQuery.toString(), is(expectedLuceneQuery.toString())); } + public void testFullTextFunctionsDisjunctionPushdown() { + String query = """ + from test + | where (match(first_name, "Anna") or qstr("first_name: Anneke")) and last_name: "Smith" + | sort emp_no + """; + var plan = plannerOptimizer.plan(query); + var topNExec = as(plan, TopNExec.class); + var exchange = as(topNExec.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var actualLuceneQuery = as(fieldExtract.child(), EsQueryExec.class).query(); + var expectedLuceneQuery = new BoolQueryBuilder().must( + new BoolQueryBuilder().should(new MatchQueryBuilder("first_name", "Anna").lenient(true)) + .should(new QueryStringQueryBuilder("first_name: Anneke")) + ).must(new MatchQueryBuilder("last_name", "Smith").lenient(true)); + assertThat(actualLuceneQuery.toString(), is(expectedLuceneQuery.toString())); + } + + public void testFullTextFunctionsDisjunctionWithFiltersPushdown() { + String query = """ + from test + | where (first_name:"Anna" or first_name:"Anneke") and length(last_name) > 5 + | sort emp_no + """; + var plan = plannerOptimizer.plan(query); + var topNExec = as(plan, TopNExec.class); + var exchange = as(topNExec.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var fieldExtract = as(project.child(), FieldExtractExec.class); + var secondTopNExec = as(fieldExtract.child(), TopNExec.class); + var secondFieldExtract = as(secondTopNExec.child(), FieldExtractExec.class); + var filterExec = as(secondFieldExtract.child(), FilterExec.class); + var thirdFilterExtract = as(filterExec.child(), FieldExtractExec.class); + var actualLuceneQuery = as(thirdFilterExtract.child(), EsQueryExec.class).query(); + var expectedLuceneQuery = new BoolQueryBuilder().should(new MatchQueryBuilder("first_name", "Anna").lenient(true)) + .should(new MatchQueryBuilder("first_name", "Anneke").lenient(true)); + assertThat(actualLuceneQuery.toString(), is(expectedLuceneQuery.toString())); + } + /** * Expecting * LimitExec[1000[INTEGER]] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index cfb993a7dd73d..17e158f088fb3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -4906,7 +4906,7 @@ public void testPlanSanityCheck() throws Exception { } public void testPlanSanityCheckWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); var plan = optimizedPlan(""" FROM test @@ -5911,7 +5911,7 @@ public void testLookupStats() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String query = """ FROM test @@ -5954,7 +5954,7 @@ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnLeftSideField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String query = """ FROM test @@ -5998,7 +5998,7 @@ public void testLookupJoinPushDownFilterOnLeftSideField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownDisabledForLookupField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String query = """ FROM test @@ -6043,7 +6043,7 @@ public void testLookupJoinPushDownDisabledForLookupField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String query = """ FROM test @@ -6096,7 +6096,7 @@ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightFiel * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownDisabledForDisjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); String query = """ FROM test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 964dd4642d7c2..c7bb6e49703ed 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -2331,7 +2331,7 @@ public void testVerifierOnMissingReferences() { } public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); // Do not assert serialization: // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index 31ec4663738f7..60bdf4e7f73d3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -1365,7 +1365,7 @@ public void testMetrics() { } public void testLookupJoin() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( "FROM employees | KEEP languages | RENAME languages AS language_code | LOOKUP JOIN languages_lookup ON language_code", Set.of("languages", "languages.*", "language_code", "language_code.*"), @@ -1374,7 +1374,7 @@ public void testLookupJoin() { } public void testLookupJoinKeep() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM employees @@ -1388,7 +1388,7 @@ public void testLookupJoinKeep() { } public void testLookupJoinKeepWildcard() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM employees @@ -1402,7 +1402,7 @@ public void testLookupJoinKeepWildcard() { } public void testMultiLookupJoin() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1415,7 +1415,7 @@ public void testMultiLookupJoin() { } public void testMultiLookupJoinKeepBefore() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1429,7 +1429,7 @@ public void testMultiLookupJoinKeepBefore() { } public void testMultiLookupJoinKeepBetween() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1454,7 +1454,7 @@ public void testMultiLookupJoinKeepBetween() { } public void testMultiLookupJoinKeepAfter() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1481,7 +1481,7 @@ public void testMultiLookupJoinKeepAfter() { } public void testMultiLookupJoinKeepAfterWildcard() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1495,7 +1495,7 @@ public void testMultiLookupJoinKeepAfterWildcard() { } public void testMultiLookupJoinSameIndex() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1509,7 +1509,7 @@ public void testMultiLookupJoinSameIndex() { } public void testMultiLookupJoinSameIndexKeepBefore() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1524,7 +1524,7 @@ public void testMultiLookupJoinSameIndexKeepBefore() { } public void testMultiLookupJoinSameIndexKeepBetween() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1550,7 +1550,7 @@ public void testMultiLookupJoinSameIndexKeepBetween() { } public void testMultiLookupJoinSameIndexKeepAfter() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V8.isEnabled()); assertFieldNames( """ FROM sample_data diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index b323efad2b4c3..539cd0314a4d1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -123,6 +123,7 @@ public void testFailedMetric() { new EsqlExecutionInfo(randomBoolean()), groupIndicesByCluster, runPhase, + EsqlTestUtils.MOCK_QUERY_BUILDER_RESOLVER, new ActionListener<>() { @Override public void onResponse(Result result) { @@ -152,6 +153,7 @@ public void onFailure(Exception e) { new EsqlExecutionInfo(randomBoolean()), groupIndicesByCluster, runPhase, + EsqlTestUtils.MOCK_QUERY_BUILDER_RESOLVER, new ActionListener<>() { @Override public void onResponse(Result result) {} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java index 8a57dfa968ccd..9a30c2281d742 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.esql.type; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.type.DataType; +import java.time.Instant; import java.util.Arrays; import java.util.List; @@ -50,11 +52,19 @@ public class EsqlDataTypeConverterTests extends ESTestCase { public void testNanoTimeToString() { - long expected = randomLong(); + long expected = randomNonNegativeLong(); long actual = EsqlDataTypeConverter.dateNanosToLong(EsqlDataTypeConverter.nanoTimeToString(expected)); assertEquals(expected, actual); } + public void testStringToDateNanos() { + assertEquals( + DateUtils.toLong(Instant.parse("2023-01-01T00:00:00.000Z")), + EsqlDataTypeConverter.convert("2023-01-01T00:00:00.000000000", DATE_NANOS) + ); + assertEquals(DateUtils.toLong(Instant.parse("2023-01-01T00:00:00.000Z")), EsqlDataTypeConverter.convert("2023-01-01", DATE_NANOS)); + } + public void testCommonTypeNull() { for (DataType dataType : DataType.values()) { assertEqualsCommonType(dataType, NULL, dataType); diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java index c7b3a9d42f579..3b0fc869c8124 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; @@ -59,7 +58,7 @@ public void setup() throws Exception { @Override protected Collection> nodePlugins() { - return Arrays.asList(LocalStateInferencePlugin.class); + return Arrays.asList(Utils.TestInferencePlugin.class); } public void testBulkOperations() throws Exception { diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index d5c156d1d4f46..be6b3725b0f35 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalModel; @@ -76,7 +76,7 @@ public void createComponents() { @Override protected Collection> getPlugins() { - return pluginList(ReindexPlugin.class, LocalStateInferencePlugin.class); + return pluginList(ReindexPlugin.class, InferencePlugin.class); } public void testStoreModel() throws Exception { diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 1c2240e8c5217..53974657e4e23 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -34,7 +34,6 @@ requires software.amazon.awssdk.retries.api; requires org.reactivestreams; requires org.elasticsearch.logging; - requires org.elasticsearch.sslconfig; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 3b7613b8b0e1f..876ff01812064 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -10,13 +10,15 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; -import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import java.util.Set; +import static org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor.SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED; +import static org.elasticsearch.xpack.inference.queries.SemanticSparseVectorQueryRewriteInterceptor.SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED; + /** * Provides inference features. */ @@ -45,7 +47,8 @@ public Set getTestFeatures() { SemanticTextFieldMapper.SEMANTIC_TEXT_ZERO_SIZE_FIX, SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX, SEMANTIC_TEXT_HIGHLIGHTER, - SemanticMatchQueryRewriteInterceptor.SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED + SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED, + SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 93743a5485c2c..72fa840ad19b0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -28,7 +28,6 @@ import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.InferenceServiceRegistry; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.node.PluginComponentBinding; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; @@ -46,7 +45,6 @@ import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.inference.action.DeleteInferenceEndpointAction; import org.elasticsearch.xpack.core.inference.action.GetInferenceDiagnosticsAction; @@ -56,7 +54,6 @@ import org.elasticsearch.xpack.core.inference.action.PutInferenceModelAction; import org.elasticsearch.xpack.core.inference.action.UnifiedCompletionAction; import org.elasticsearch.xpack.core.inference.action.UpdateInferenceModelAction; -import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.inference.action.TransportDeleteInferenceEndpointAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceDiagnosticsAction; import org.elasticsearch.xpack.inference.action.TransportGetInferenceModelAction; @@ -80,6 +77,7 @@ import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.elasticsearch.xpack.inference.queries.SemanticSparseVectorQueryRewriteInterceptor; import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; @@ -121,6 +119,7 @@ import java.util.Map; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.Collections.singletonList; @@ -154,7 +153,6 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP private final Settings settings; private final SetOnce httpFactory = new SetOnce<>(); private final SetOnce amazonBedrockFactory = new SetOnce<>(); - private final SetOnce elasicInferenceServiceFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); private final SetOnce elasticInferenceServiceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); @@ -237,31 +235,31 @@ public Collection createComponents(PluginServices services) { var inferenceServices = new ArrayList<>(inferenceServiceExtensions); inferenceServices.add(this::getInferenceServiceFactories); - if (isElasticInferenceServiceEnabled()) { - // Create a separate instance of HTTPClientManager with its own SSL configuration (`xpack.inference.elastic.http.ssl.*`). - var elasticInferenceServiceHttpClientManager = HttpClientManager.create( - settings, - services.threadPool(), - services.clusterService(), - throttlerManager, - getSslService() - ); + // Set elasticInferenceUrl based on feature flags to support transitioning to the new Elastic Inference Service URL without exposing + // internal names like "eis" or "gateway". + ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); + + String elasticInferenceUrl = null; - var elasticInferenceServiceRequestSenderFactory = new HttpRequestSender.Factory( - serviceComponents.get(), - elasticInferenceServiceHttpClientManager, - services.clusterService() + if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + elasticInferenceUrl = inferenceServiceSettings.getElasticInferenceServiceUrl(); + } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + log.warn( + "Deprecated flag {} detected for enabling {}. Please use {}.", + ELASTIC_INFERENCE_SERVICE_IDENTIFIER, + DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, + ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG ); - elasicInferenceServiceFactory.set(elasticInferenceServiceRequestSenderFactory); + elasticInferenceUrl = inferenceServiceSettings.getEisGatewayUrl(); + } - ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); - String elasticInferenceUrl = this.getElasticInferenceServiceUrl(inferenceServiceSettings); + if (elasticInferenceUrl != null) { elasticInferenceServiceComponents.set(new ElasticInferenceServiceComponents(elasticInferenceUrl)); inferenceServices.add( () -> List.of( context -> new ElasticInferenceService( - elasicInferenceServiceFactory.get(), + httpFactory.get(), serviceComponents.get(), elasticInferenceServiceComponents.get() ) @@ -384,21 +382,16 @@ public static ExecutorBuilder inferenceUtilityExecutor(Settings settings) { @Override public List> getSettings() { - ArrayList> settings = new ArrayList<>(); - settings.addAll(HttpSettings.getSettingsDefinitions()); - settings.addAll(HttpClientManager.getSettingsDefinitions()); - settings.addAll(ThrottlerManager.getSettingsDefinitions()); - settings.addAll(RetrySettings.getSettingsDefinitions()); - settings.addAll(Truncator.getSettingsDefinitions()); - settings.addAll(RequestExecutorServiceSettings.getSettingsDefinitions()); - settings.add(SKIP_VALIDATE_AND_START); - - // Register Elastic Inference Service settings definitions if the corresponding feature flag is enabled. - if (isElasticInferenceServiceEnabled()) { - settings.addAll(ElasticInferenceServiceSettings.getSettingsDefinitions()); - } - - return settings; + return Stream.of( + HttpSettings.getSettingsDefinitions(), + HttpClientManager.getSettingsDefinitions(), + ThrottlerManager.getSettingsDefinitions(), + RetrySettings.getSettingsDefinitions(), + ElasticInferenceServiceSettings.getSettingsDefinitions(), + Truncator.getSettingsDefinitions(), + RequestExecutorServiceSettings.getSettingsDefinitions(), + List.of(SKIP_VALIDATE_AND_START) + ).flatMap(Collection::stream).collect(Collectors.toList()); } @Override @@ -440,16 +433,13 @@ public List> getQueries() { @Override public List getQueryRewriteInterceptors() { - return List.of(new SemanticMatchQueryRewriteInterceptor()); + return List.of(new SemanticMatchQueryRewriteInterceptor(), new SemanticSparseVectorQueryRewriteInterceptor()); } @Override public List> getRetrievers() { return List.of( - new RetrieverSpec<>( - new ParseField(TextSimilarityRankBuilder.NAME), - (parser, context) -> TextSimilarityRankRetrieverBuilder.fromXContent(parser, context, getLicenseState()) - ), + new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent), new RetrieverSpec<>(new ParseField(RandomRankBuilder.NAME), RandomRankRetrieverBuilder::fromXContent) ); } @@ -458,36 +448,4 @@ public List> getRetrievers() { public Map getHighlighters() { return Map.of(SemanticTextHighlighter.NAME, new SemanticTextHighlighter()); } - - // Get Elastic Inference service URL based on feature flags to support transitioning - // to the new Elastic Inference Service URL. - private String getElasticInferenceServiceUrl(ElasticInferenceServiceSettings settings) { - String elasticInferenceUrl = null; - - if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - elasticInferenceUrl = settings.getElasticInferenceServiceUrl(); - } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - log.warn( - "Deprecated flag {} detected for enabling {}. Please use {}.", - ELASTIC_INFERENCE_SERVICE_IDENTIFIER, - DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, - ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG - ); - elasticInferenceUrl = settings.getEisGatewayUrl(); - } - - return elasticInferenceUrl; - } - - protected Boolean isElasticInferenceServiceEnabled() { - return (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() || DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()); - } - - protected SSLService getSslService() { - return XPackPlugin.getSharedSslService(); - } - - protected XPackLicenseState getLicenseState() { - return XPackPlugin.getSharedLicenseState(); - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java index 6d09c9e67b363..e5d76b9bb5570 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java @@ -7,14 +7,9 @@ package org.elasticsearch.xpack.inference.external.http; -import org.apache.http.config.Registry; -import org.apache.http.config.RegistryBuilder; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.nio.conn.NoopIOSessionStrategy; -import org.apache.http.nio.conn.SchemeIOSessionStrategy; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.nio.reactor.ConnectingIOReactor; import org.apache.http.nio.reactor.IOReactorException; import org.apache.http.pool.PoolStats; @@ -26,7 +21,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import java.io.Closeable; @@ -34,13 +28,11 @@ import java.util.List; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX; public class HttpClientManager implements Closeable { private static final Logger logger = LogManager.getLogger(HttpClientManager.class); /** * The maximum number of total connections the connection pool can lease to all routes. - * The configuration applies to each instance of HTTPClientManager (max_total_connections=10 and instances=5 leads to 50 connections). * From googling around the connection pools maxTotal value should be close to the number of available threads. * * https://stackoverflow.com/questions/30989637/how-to-decide-optimal-settings-for-setmaxtotal-and-setdefaultmaxperroute @@ -55,7 +47,6 @@ public class HttpClientManager implements Closeable { /** * The max number of connections a single route can lease. - * This configuration applies to each instance of HttpClientManager. */ public static final Setting MAX_ROUTE_CONNECTIONS = Setting.intSetting( "xpack.inference.http.max_route_connections", @@ -107,22 +98,6 @@ public static HttpClientManager create( return new HttpClientManager(settings, connectionManager, threadPool, clusterService, throttlerManager); } - public static HttpClientManager create( - Settings settings, - ThreadPool threadPool, - ClusterService clusterService, - ThrottlerManager throttlerManager, - SSLService sslService - ) { - // Set the sslStrategy to ensure an encrypted connection, as Elastic Inference Service requires it. - SSLIOSessionStrategy sslioSessionStrategy = sslService.sslIOSessionStrategy( - sslService.getSSLConfiguration(ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX) - ); - - PoolingNHttpClientConnectionManager connectionManager = createConnectionManager(sslioSessionStrategy); - return new HttpClientManager(settings, connectionManager, threadPool, clusterService, throttlerManager); - } - // Default for testing HttpClientManager( Settings settings, @@ -146,25 +121,6 @@ public static HttpClientManager create( this.addSettingsUpdateConsumers(clusterService); } - private static PoolingNHttpClientConnectionManager createConnectionManager(SSLIOSessionStrategy sslStrategy) { - ConnectingIOReactor ioReactor; - try { - var configBuilder = IOReactorConfig.custom().setSoKeepAlive(true); - ioReactor = new DefaultConnectingIOReactor(configBuilder.build()); - } catch (IOReactorException e) { - var message = "Failed to initialize HTTP client manager with SSL."; - logger.error(message, e); - throw new ElasticsearchException(message, e); - } - - Registry registry = RegistryBuilder.create() - .register("http", NoopIOSessionStrategy.INSTANCE) - .register("https", sslStrategy) - .build(); - - return new PoolingNHttpClientConnectionManager(ioReactor, registry); - } - private static PoolingNHttpClientConnectionManager createConnectionManager() { ConnectingIOReactor ioReactor; try { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java index a4a8123935c3e..fd1d65d00faf5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticMatchQueryRewriteInterceptor.java @@ -7,24 +7,12 @@ package org.elasticsearch.xpack.inference.queries; -import org.elasticsearch.action.ResolvedIndices; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.query.TermsQueryBuilder; -import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -public class SemanticMatchQueryRewriteInterceptor implements QueryRewriteInterceptor { +public class SemanticMatchQueryRewriteInterceptor extends SemanticQueryRewriteInterceptor { public static final NodeFeature SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature( "search.semantic_match_query_rewrite_interception_supported" @@ -33,63 +21,45 @@ public class SemanticMatchQueryRewriteInterceptor implements QueryRewriteInterce public SemanticMatchQueryRewriteInterceptor() {} @Override - public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + protected String getFieldName(QueryBuilder queryBuilder) { assert (queryBuilder instanceof MatchQueryBuilder); MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; - QueryBuilder rewritten = queryBuilder; - ResolvedIndices resolvedIndices = context.getResolvedIndices(); - if (resolvedIndices != null) { - Collection indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values(); - List inferenceIndices = new ArrayList<>(); - List nonInferenceIndices = new ArrayList<>(); - for (IndexMetadata indexMetadata : indexMetadataCollection) { - String indexName = indexMetadata.getIndex().getName(); - InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(matchQueryBuilder.fieldName()); - if (inferenceFieldMetadata != null) { - inferenceIndices.add(indexName); - } else { - nonInferenceIndices.add(indexName); - } - } - - if (inferenceIndices.isEmpty()) { - return rewritten; - } else if (nonInferenceIndices.isEmpty() == false) { - BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - for (String inferenceIndexName : inferenceIndices) { - // Add a separate clause for each semantic query, because they may be using different inference endpoints - // TODO - consolidate this to a single clause once the semantic query supports multiple inference endpoints - boolQueryBuilder.should( - createSemanticSubQuery(inferenceIndexName, matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value()) - ); - } - boolQueryBuilder.should(createMatchSubQuery(nonInferenceIndices, matchQueryBuilder)); - rewritten = boolQueryBuilder; - } else { - rewritten = new SemanticQueryBuilder(matchQueryBuilder.fieldName(), (String) matchQueryBuilder.value(), false); - } - } - - return rewritten; + return matchQueryBuilder.fieldName(); + } + @Override + protected String getQuery(QueryBuilder queryBuilder) { + assert (queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; + return (String) matchQueryBuilder.value(); } @Override - public String getQueryName() { - return MatchQueryBuilder.NAME; + protected QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation) { + return new SemanticQueryBuilder(indexInformation.fieldName(), getQuery(queryBuilder), false); } - private QueryBuilder createSemanticSubQuery(String indexName, String fieldName, String value) { + @Override + protected QueryBuilder buildCombinedInferenceAndNonInferenceQuery( + QueryBuilder queryBuilder, + InferenceIndexInformationForField indexInformation + ) { + assert (queryBuilder instanceof MatchQueryBuilder); + MatchQueryBuilder matchQueryBuilder = (MatchQueryBuilder) queryBuilder; BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - boolQueryBuilder.must(new SemanticQueryBuilder(fieldName, value, true)); - boolQueryBuilder.filter(new TermQueryBuilder(IndexFieldMapper.NAME, indexName)); + boolQueryBuilder.should( + createSemanticSubQuery( + indexInformation.getInferenceIndices(), + matchQueryBuilder.fieldName(), + (String) matchQueryBuilder.value() + ) + ); + boolQueryBuilder.should(createSubQueryForIndices(indexInformation.nonInferenceIndices(), matchQueryBuilder)); return boolQueryBuilder; } - private QueryBuilder createMatchSubQuery(List indices, MatchQueryBuilder matchQueryBuilder) { - BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); - boolQueryBuilder.must(matchQueryBuilder); - boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); - return boolQueryBuilder; + @Override + public String getQueryName() { + return MatchQueryBuilder.NAME; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java index 2a34651efcd9d..dd0f6fe59ab23 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -148,6 +148,14 @@ public String getWriteableName() { return NAME; } + public String getFieldName() { + return fieldName; + } + + public String getQuery() { + return query; + } + @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_8_15_0; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..bb76ef0be24e9 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryRewriteInterceptor.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.index.mapper.IndexFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Intercepts and adapts a query to be rewritten to work seamlessly on a semantic_text field. + */ +public abstract class SemanticQueryRewriteInterceptor implements QueryRewriteInterceptor { + + public SemanticQueryRewriteInterceptor() {} + + @Override + public QueryBuilder interceptAndRewrite(QueryRewriteContext context, QueryBuilder queryBuilder) { + String fieldName = getFieldName(queryBuilder); + ResolvedIndices resolvedIndices = context.getResolvedIndices(); + + if (resolvedIndices == null) { + // No resolved indices, so return the original query. + return queryBuilder; + } + + InferenceIndexInformationForField indexInformation = resolveIndicesForField(fieldName, resolvedIndices); + if (indexInformation.getInferenceIndices().isEmpty()) { + // No inference fields were identified, so return the original query. + return queryBuilder; + } else if (indexInformation.nonInferenceIndices().isEmpty() == false) { + // Combined case where the field name requested by this query contains both + // semantic_text and non-inference fields, so we have to combine queries per index + // containing each field type. + return buildCombinedInferenceAndNonInferenceQuery(queryBuilder, indexInformation); + } else { + // The only fields we've identified are inference fields (e.g. semantic_text), + // so rewrite the entire query to work on a semantic_text field. + return buildInferenceQuery(queryBuilder, indexInformation); + } + } + + /** + * @param queryBuilder {@link QueryBuilder} + * @return The singular field name requested by the provided query builder. + */ + protected abstract String getFieldName(QueryBuilder queryBuilder); + + /** + * @param queryBuilder {@link QueryBuilder} + * @return The text/query string requested by the provided query builder. + */ + protected abstract String getQuery(QueryBuilder queryBuilder); + + /** + * Builds the inference query + * + * @param queryBuilder {@link QueryBuilder} + * @param indexInformation {@link InferenceIndexInformationForField} + * @return {@link QueryBuilder} + */ + protected abstract QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation); + + /** + * Builds a combined inference and non-inference query, + * which separates the different queries into appropriate indices based on field type. + * @param queryBuilder {@link QueryBuilder} + * @param indexInformation {@link InferenceIndexInformationForField} + * @return {@link QueryBuilder} + */ + protected abstract QueryBuilder buildCombinedInferenceAndNonInferenceQuery( + QueryBuilder queryBuilder, + InferenceIndexInformationForField indexInformation + ); + + private InferenceIndexInformationForField resolveIndicesForField(String fieldName, ResolvedIndices resolvedIndices) { + Collection indexMetadataCollection = resolvedIndices.getConcreteLocalIndicesMetadata().values(); + Map inferenceIndicesMetadata = new HashMap<>(); + List nonInferenceIndices = new ArrayList<>(); + for (IndexMetadata indexMetadata : indexMetadataCollection) { + String indexName = indexMetadata.getIndex().getName(); + InferenceFieldMetadata inferenceFieldMetadata = indexMetadata.getInferenceFields().get(fieldName); + if (inferenceFieldMetadata != null) { + inferenceIndicesMetadata.put(indexName, inferenceFieldMetadata); + } else { + nonInferenceIndices.add(indexName); + } + } + + return new InferenceIndexInformationForField(fieldName, inferenceIndicesMetadata, nonInferenceIndices); + } + + protected QueryBuilder createSubQueryForIndices(Collection indices, QueryBuilder queryBuilder) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(queryBuilder); + boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); + return boolQueryBuilder; + } + + protected QueryBuilder createSemanticSubQuery(Collection indices, String fieldName, String value) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.must(new SemanticQueryBuilder(fieldName, value, true)); + boolQueryBuilder.filter(new TermsQueryBuilder(IndexFieldMapper.NAME, indices)); + return boolQueryBuilder; + } + + /** + * Represents the indices and associated inference information for a field. + */ + public record InferenceIndexInformationForField( + String fieldName, + Map inferenceIndicesMetadata, + List nonInferenceIndices + ) { + + public Collection getInferenceIndices() { + return inferenceIndicesMetadata.keySet(); + } + + public Map> getInferenceIdsIndices() { + return inferenceIndicesMetadata.entrySet() + .stream() + .collect( + Collectors.groupingBy( + entry -> entry.getValue().getSearchInferenceId(), + Collectors.mapping(Map.Entry::getKey, Collectors.toList()) + ) + ); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java new file mode 100644 index 0000000000000..a35e83450c55a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticSparseVectorQueryRewriteInterceptor.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.queries; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilder; +import org.elasticsearch.xpack.inference.mapper.SemanticTextField; + +import java.util.List; +import java.util.Map; + +public class SemanticSparseVectorQueryRewriteInterceptor extends SemanticQueryRewriteInterceptor { + + public static final NodeFeature SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED = new NodeFeature( + "search.semantic_sparse_vector_query_rewrite_interception_supported" + ); + + public SemanticSparseVectorQueryRewriteInterceptor() {} + + @Override + protected String getFieldName(QueryBuilder queryBuilder) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + return sparseVectorQueryBuilder.getFieldName(); + } + + @Override + protected String getQuery(QueryBuilder queryBuilder) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + return sparseVectorQueryBuilder.getQuery(); + } + + @Override + protected QueryBuilder buildInferenceQuery(QueryBuilder queryBuilder, InferenceIndexInformationForField indexInformation) { + Map> inferenceIdsIndices = indexInformation.getInferenceIdsIndices(); + if (inferenceIdsIndices.size() == 1) { + // Simple case, everything uses the same inference ID + String searchInferenceId = inferenceIdsIndices.keySet().iterator().next(); + return buildNestedQueryFromSparseVectorQuery(queryBuilder, searchInferenceId); + } else { + // Multiple inference IDs, construct a boolean query + return buildInferenceQueryWithMultipleInferenceIds(queryBuilder, inferenceIdsIndices); + } + } + + private QueryBuilder buildInferenceQueryWithMultipleInferenceIds( + QueryBuilder queryBuilder, + Map> inferenceIdsIndices + ) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + for (String inferenceId : inferenceIdsIndices.keySet()) { + boolQueryBuilder.should( + createSubQueryForIndices( + inferenceIdsIndices.get(inferenceId), + buildNestedQueryFromSparseVectorQuery(queryBuilder, inferenceId) + ) + ); + } + return boolQueryBuilder; + } + + @Override + protected QueryBuilder buildCombinedInferenceAndNonInferenceQuery( + QueryBuilder queryBuilder, + InferenceIndexInformationForField indexInformation + ) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + Map> inferenceIdsIndices = indexInformation.getInferenceIdsIndices(); + + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.should( + createSubQueryForIndices( + indexInformation.nonInferenceIndices(), + createSubQueryForIndices(indexInformation.nonInferenceIndices(), sparseVectorQueryBuilder) + ) + ); + // We always perform nested subqueries on semantic_text fields, to support + // sparse_vector queries using query vectors. + for (String inferenceId : inferenceIdsIndices.keySet()) { + boolQueryBuilder.should( + createSubQueryForIndices( + inferenceIdsIndices.get(inferenceId), + buildNestedQueryFromSparseVectorQuery(sparseVectorQueryBuilder, inferenceId) + ) + ); + } + return boolQueryBuilder; + } + + private QueryBuilder buildNestedQueryFromSparseVectorQuery(QueryBuilder queryBuilder, String searchInferenceId) { + assert (queryBuilder instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) queryBuilder; + return QueryBuilders.nestedQuery( + SemanticTextField.getChunksFieldName(sparseVectorQueryBuilder.getFieldName()), + new SparseVectorQueryBuilder( + SemanticTextField.getEmbeddingsFieldName(sparseVectorQueryBuilder.getFieldName()), + sparseVectorQueryBuilder.getQueryVectors(), + (sparseVectorQueryBuilder.getInferenceId() == null && sparseVectorQueryBuilder.getQuery() != null) + ? searchInferenceId + : sparseVectorQueryBuilder.getInferenceId(), + sparseVectorQueryBuilder.getQuery(), + sparseVectorQueryBuilder.shouldPruneTokens(), + sparseVectorQueryBuilder.getTokenPruningConfig() + ), + ScoreMode.Max + ); + } + + @Override + public String getQueryName() { + return SparseVectorQueryBuilder.NAME; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index f54696895a818..fd2427dc8ac6a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; @@ -22,6 +21,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.List; @@ -73,11 +73,8 @@ public class TextSimilarityRankRetrieverBuilder extends CompoundRetrieverBuilder RetrieverBuilder.declareBaseParserFields(TextSimilarityRankBuilder.NAME, PARSER); } - public static TextSimilarityRankRetrieverBuilder fromXContent( - XContentParser parser, - RetrieverParserContext context, - XPackLicenseState licenceState - ) throws IOException { + public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) + throws IOException { if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED) == false) { throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); } @@ -86,7 +83,7 @@ public static TextSimilarityRankRetrieverBuilder fromXContent( "[text_similarity_reranker] retriever composition feature is not supported by all nodes in the cluster" ); } - if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(licenceState) == false) { + if (TextSimilarityRankBuilder.TEXT_SIMILARITY_RERANKER_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { throw LicenseUtils.newComplianceException(TextSimilarityRankBuilder.NAME); } return PARSER.apply(parser, context); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java index 3177474ea8ca6..bf94f072b6e04 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/ServerSentEventsRestActionListener.java @@ -43,6 +43,7 @@ import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; +import static org.elasticsearch.rest.RestController.ERROR_TRACE_DEFAULT; /** * A version of {@link org.elasticsearch.rest.action.RestChunkedToXContentListener} that reads from a {@link Flow.Publisher} and encodes @@ -161,7 +162,7 @@ private ChunkedToXContent errorChunk(Throwable t) { } var errorParams = p; - if (errorParams.paramAsBoolean("error_trace", false) && status != RestStatus.UNAUTHORIZED) { + if (errorParams.paramAsBoolean("error_trace", ERROR_TRACE_DEFAULT) && status != RestStatus.UNAUTHORIZED) { errorParams = new ToXContent.DelegatingMapParams( Map.of(REST_EXCEPTION_SKIP_STACK_TRACE, "false", REST_EXCEPTION_SKIP_CAUSE, "true"), params diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java index 431a3647e2879..bc2daddc2a346 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java @@ -9,9 +9,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; -import java.util.ArrayList; import java.util.List; public class ElasticInferenceServiceSettings { @@ -19,8 +17,6 @@ public class ElasticInferenceServiceSettings { @Deprecated static final Setting EIS_GATEWAY_URL = Setting.simpleString("xpack.inference.eis.gateway.url", Setting.Property.NodeScope); - public static final String ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX = "xpack.inference.elastic.http.ssl."; - static final Setting ELASTIC_INFERENCE_SERVICE_URL = Setting.simpleString( "xpack.inference.elastic.url", Setting.Property.NodeScope @@ -35,27 +31,11 @@ public class ElasticInferenceServiceSettings { public ElasticInferenceServiceSettings(Settings settings) { eisGatewayUrl = EIS_GATEWAY_URL.get(settings); elasticInferenceServiceUrl = ELASTIC_INFERENCE_SERVICE_URL.get(settings); - } - - public static final SSLConfigurationSettings ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_SETTINGS = SSLConfigurationSettings.withPrefix( - ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX, - false - ); - public static final Setting ELASTIC_INFERENCE_SERVICE_SSL_ENABLED = Setting.boolSetting( - ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX + "enabled", - true, - Setting.Property.NodeScope - ); + } public static List> getSettingsDefinitions() { - ArrayList> settings = new ArrayList<>(); - settings.add(EIS_GATEWAY_URL); - settings.add(ELASTIC_INFERENCE_SERVICE_URL); - settings.add(ELASTIC_INFERENCE_SERVICE_SSL_ENABLED); - settings.addAll(ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_SETTINGS.getEnabledSettings()); - - return settings; + return List.of(EIS_GATEWAY_URL, ELASTIC_INFERENCE_SERVICE_URL); } @Deprecated diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java index 61033a0211065..bfec2d5ac3484 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.inference.InferencePlugin; import org.hamcrest.Matchers; @@ -29,7 +28,7 @@ public class SemanticTextClusterMetadataTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(XPackPlugin.class, InferencePlugin.class); + return List.of(InferencePlugin.class); } public void testCreateIndexWithSemanticTextField() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticMatchQueryRewriteInterceptorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticMatchQueryRewriteInterceptorTests.java new file mode 100644 index 0000000000000..47705c14d5941 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticMatchQueryRewriteInterceptorTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.action.MockResolvedIndices; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.inference.queries.SemanticMatchQueryRewriteInterceptor; +import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +public class SemanticMatchQueryRewriteInterceptorTests extends ESTestCase { + + private TestThreadPool threadPool; + private NoOpClient client; + private Index index; + + private static final String FIELD_NAME = "fieldName"; + private static final String VALUE = "value"; + + @Before + public void setup() { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); + index = new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + @After + public void cleanup() { + threadPool.close(); + } + + public void testMatchQueryOnInferenceFieldIsInterceptedAndRewrittenToSemanticQuery() throws IOException { + Map inferenceFields = Map.of( + FIELD_NAME, + new InferenceFieldMetadata(index.getName(), "inferenceId", new String[] { FIELD_NAME }) + ); + QueryRewriteContext context = createQueryRewriteContext(inferenceFields); + QueryBuilder original = createTestQueryBuilder(); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to be intercepted, but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof InterceptedQueryBuilderWrapper + ); + InterceptedQueryBuilderWrapper intercepted = (InterceptedQueryBuilderWrapper) rewritten; + assertTrue(intercepted.queryBuilder instanceof SemanticQueryBuilder); + SemanticQueryBuilder semanticQueryBuilder = (SemanticQueryBuilder) intercepted.queryBuilder; + assertEquals(FIELD_NAME, semanticQueryBuilder.getFieldName()); + assertEquals(VALUE, semanticQueryBuilder.getQuery()); + } + + public void testMatchQueryOnNonInferenceFieldRemainsMatchQuery() throws IOException { + QueryRewriteContext context = createQueryRewriteContext(Map.of()); // No inference fields + QueryBuilder original = createTestQueryBuilder(); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to remain match but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof MatchQueryBuilder + ); + assertEquals(original, rewritten); + } + + private MatchQueryBuilder createTestQueryBuilder() { + return new MatchQueryBuilder(FIELD_NAME, VALUE); + } + + private QueryRewriteContext createQueryRewriteContext(Map inferenceFields) { + IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + ) + .numberOfShards(1) + .numberOfReplicas(0) + .putInferenceFields(inferenceFields) + .build(); + + ResolvedIndices resolvedIndices = new MockResolvedIndices( + Map.of(), + new OriginalIndices(new String[] { index.getName() }, IndicesOptions.DEFAULT), + Map.of(index, indexMetadata) + ); + + return new QueryRewriteContext(null, client, null, resolvedIndices, null, createRewriteInterceptor()); + } + + private QueryRewriteInterceptor createRewriteInterceptor() { + return new SemanticMatchQueryRewriteInterceptor(); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticSparseVectorQueryRewriteInterceptorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticSparseVectorQueryRewriteInterceptorTests.java new file mode 100644 index 0000000000000..1adad1df7b29b --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/index/query/SemanticSparseVectorQueryRewriteInterceptorTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.action.MockResolvedIndices; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.ResolvedIndices; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.plugins.internal.rewriter.QueryRewriteInterceptor; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilder; +import org.elasticsearch.xpack.inference.mapper.SemanticTextField; +import org.elasticsearch.xpack.inference.queries.SemanticSparseVectorQueryRewriteInterceptor; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +public class SemanticSparseVectorQueryRewriteInterceptorTests extends ESTestCase { + + private TestThreadPool threadPool; + private NoOpClient client; + private Index index; + + private static final String FIELD_NAME = "fieldName"; + private static final String INFERENCE_ID = "inferenceId"; + private static final String QUERY = "query"; + + @Before + public void setup() { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); + index = new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + @After + public void cleanup() { + threadPool.close(); + } + + public void testSparseVectorQueryOnInferenceFieldIsInterceptedAndRewritten() throws IOException { + Map inferenceFields = Map.of( + FIELD_NAME, + new InferenceFieldMetadata(index.getName(), "inferenceId", new String[] { FIELD_NAME }) + ); + QueryRewriteContext context = createQueryRewriteContext(inferenceFields); + QueryBuilder original = new SparseVectorQueryBuilder(FIELD_NAME, INFERENCE_ID, QUERY); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to be intercepted, but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof InterceptedQueryBuilderWrapper + ); + InterceptedQueryBuilderWrapper intercepted = (InterceptedQueryBuilderWrapper) rewritten; + assertTrue(intercepted.queryBuilder instanceof NestedQueryBuilder); + NestedQueryBuilder nestedQueryBuilder = (NestedQueryBuilder) intercepted.queryBuilder; + assertEquals(SemanticTextField.getChunksFieldName(FIELD_NAME), nestedQueryBuilder.path()); + QueryBuilder innerQuery = nestedQueryBuilder.query(); + assertTrue(innerQuery instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) innerQuery; + assertEquals(SemanticTextField.getEmbeddingsFieldName(FIELD_NAME), sparseVectorQueryBuilder.getFieldName()); + assertEquals(INFERENCE_ID, sparseVectorQueryBuilder.getInferenceId()); + assertEquals(QUERY, sparseVectorQueryBuilder.getQuery()); + } + + public void testSparseVectorQueryOnInferenceFieldWithoutInferenceIdIsInterceptedAndRewritten() throws IOException { + Map inferenceFields = Map.of( + FIELD_NAME, + new InferenceFieldMetadata(index.getName(), "inferenceId", new String[] { FIELD_NAME }) + ); + QueryRewriteContext context = createQueryRewriteContext(inferenceFields); + QueryBuilder original = new SparseVectorQueryBuilder(FIELD_NAME, null, QUERY); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to be intercepted, but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof InterceptedQueryBuilderWrapper + ); + InterceptedQueryBuilderWrapper intercepted = (InterceptedQueryBuilderWrapper) rewritten; + assertTrue(intercepted.queryBuilder instanceof NestedQueryBuilder); + NestedQueryBuilder nestedQueryBuilder = (NestedQueryBuilder) intercepted.queryBuilder; + assertEquals(SemanticTextField.getChunksFieldName(FIELD_NAME), nestedQueryBuilder.path()); + QueryBuilder innerQuery = nestedQueryBuilder.query(); + assertTrue(innerQuery instanceof SparseVectorQueryBuilder); + SparseVectorQueryBuilder sparseVectorQueryBuilder = (SparseVectorQueryBuilder) innerQuery; + assertEquals(SemanticTextField.getEmbeddingsFieldName(FIELD_NAME), sparseVectorQueryBuilder.getFieldName()); + assertEquals(INFERENCE_ID, sparseVectorQueryBuilder.getInferenceId()); + assertEquals(QUERY, sparseVectorQueryBuilder.getQuery()); + } + + public void testSparseVectorQueryOnNonInferenceFieldRemainsUnchanged() throws IOException { + QueryRewriteContext context = createQueryRewriteContext(Map.of()); // No inference fields + QueryBuilder original = new SparseVectorQueryBuilder(FIELD_NAME, INFERENCE_ID, QUERY); + QueryBuilder rewritten = original.rewrite(context); + assertTrue( + "Expected query to remain sparse_vector but was [" + rewritten.getClass().getName() + "]", + rewritten instanceof SparseVectorQueryBuilder + ); + assertEquals(original, rewritten); + } + + private QueryRewriteContext createQueryRewriteContext(Map inferenceFields) { + IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + ) + .numberOfShards(1) + .numberOfReplicas(0) + .putInferenceFields(inferenceFields) + .build(); + + ResolvedIndices resolvedIndices = new MockResolvedIndices( + Map.of(), + new OriginalIndices(new String[] { index.getName() }, IndicesOptions.DEFAULT), + Map.of(index, indexMetadata) + ); + + return new QueryRewriteContext(null, client, null, resolvedIndices, null, createRewriteInterceptor()); + } + + private QueryRewriteInterceptor createRewriteInterceptor() { + return new SemanticSparseVectorQueryRewriteInterceptor(); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java deleted file mode 100644 index d1db5b8b12cc6..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/InferencePluginTests.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference; - -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings; -import org.junit.After; -import org.junit.Before; - -import static org.hamcrest.Matchers.is; - -public class InferencePluginTests extends ESTestCase { - private InferencePlugin inferencePlugin; - - private Boolean elasticInferenceServiceEnabled = true; - - private void setElasticInferenceServiceEnabled(Boolean elasticInferenceServiceEnabled) { - this.elasticInferenceServiceEnabled = elasticInferenceServiceEnabled; - } - - @Before - public void setUp() throws Exception { - super.setUp(); - - Settings settings = Settings.builder().build(); - inferencePlugin = new InferencePlugin(settings) { - @Override - protected Boolean isElasticInferenceServiceEnabled() { - return elasticInferenceServiceEnabled; - } - }; - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - } - - public void testElasticInferenceServiceSettingsPresent() throws Exception { - setElasticInferenceServiceEnabled(true); // enable elastic inference service - boolean anyMatch = inferencePlugin.getSettings() - .stream() - .map(Setting::getKey) - .anyMatch(key -> key.startsWith(ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX)); - - assertThat("xpack.inference.elastic settings are present", anyMatch, is(true)); - } - - public void testElasticInferenceServiceSettingsNotPresent() throws Exception { - setElasticInferenceServiceEnabled(false); // disable elastic inference service - boolean noneMatch = inferencePlugin.getSettings() - .stream() - .map(Setting::getKey) - .noneMatch(key -> key.startsWith(ElasticInferenceServiceSettings.ELASTIC_INFERENCE_SERVICE_SSL_CONFIGURATION_PREFIX)); - - assertThat("xpack.inference.elastic settings are not present", noneMatch, is(true)); - } -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java deleted file mode 100644 index 68ea175bd9870..0000000000000 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/LocalStateInferencePlugin.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference; - -import org.elasticsearch.action.support.MappedActionFilter; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.inference.InferenceServiceExtension; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.inference.mock.TestDenseInferenceServiceExtension; -import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; - -import java.nio.file.Path; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import static java.util.stream.Collectors.toList; - -public class LocalStateInferencePlugin extends LocalStateCompositeXPackPlugin { - private final InferencePlugin inferencePlugin; - - public LocalStateInferencePlugin(final Settings settings, final Path configPath) throws Exception { - super(settings, configPath); - LocalStateInferencePlugin thisVar = this; - this.inferencePlugin = new InferencePlugin(settings) { - @Override - protected SSLService getSslService() { - return thisVar.getSslService(); - } - - @Override - protected XPackLicenseState getLicenseState() { - return thisVar.getLicenseState(); - } - - @Override - public List getInferenceServiceFactories() { - return List.of( - TestSparseInferenceServiceExtension.TestInferenceService::new, - TestDenseInferenceServiceExtension.TestInferenceService::new - ); - } - }; - plugins.add(inferencePlugin); - } - - @Override - public List> getRetrievers() { - return this.filterPlugins(SearchPlugin.class).stream().flatMap(p -> p.getRetrievers().stream()).collect(toList()); - } - - @Override - public Map getMappers() { - return inferencePlugin.getMappers(); - } - - @Override - public Collection getMappedActionFilters() { - return inferencePlugin.getMappedActionFilters(); - } - -} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 0f322e64755be..9395ae222e9ba 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.inference.InferenceServiceExtension; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; @@ -142,6 +143,20 @@ private static void blockingCall( latch.await(); } + public static class TestInferencePlugin extends InferencePlugin { + public TestInferencePlugin(Settings settings) { + super(settings); + } + + @Override + public List getInferenceServiceFactories() { + return List.of( + TestSparseInferenceServiceExtension.TestInferenceService::new, + TestDenseInferenceServiceExtension.TestInferenceService::new + ); + } + } + public static Model getInvalidModel(String inferenceEntityId, String serviceName) { var mockConfigs = mock(ModelConfigurations.class); when(mockConfigs.getInferenceEntityId()).thenReturn(inferenceEntityId); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java index 24183b21f73e7..1f58c4165056d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextNonDynamicFieldMapperTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.index.mapper.NonDynamicFieldMapperTests; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; import org.elasticsearch.xpack.inference.Utils; import org.elasticsearch.xpack.inference.mock.TestSparseInferenceServiceExtension; import org.junit.Before; @@ -27,7 +26,7 @@ public void setup() throws Exception { @Override protected Collection> getPlugins() { - return List.of(LocalStateInferencePlugin.class); + return List.of(Utils.TestInferencePlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java index daed03c198e0d..6d6403b69ea11 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankMultiNodeTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.rank.RankBuilder; import org.elasticsearch.search.rank.rerank.AbstractRerankerIT; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import java.util.Collection; import java.util.List; @@ -40,7 +40,7 @@ protected RankBuilder getThrowingRankBuilder(int rankWindowSize, String rankFeat @Override protected Collection> pluginsNeeded() { - return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); + return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class); } public void testQueryPhaseShardThrowingAllShardsFail() throws Exception { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java index ba6924ba0ff3b..084a7f3de4a53 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverTelemetryTests.java @@ -24,7 +24,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.junit.Before; import java.io.IOException; @@ -46,7 +47,7 @@ protected boolean addMockHttpTransport() { @Override protected Collection> nodePlugins() { - return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); + return List.of(InferencePlugin.class, XPackPlugin.class, TextSimilarityTestPlugin.class); } @Override diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java index f81f2965c392e..a042fca44fdb5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xpack.core.inference.action.InferenceAction; -import org.elasticsearch.xpack.inference.LocalStateInferencePlugin; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.junit.Before; import java.util.Collection; @@ -108,7 +108,7 @@ protected InferenceAction.Request generateRequest(List docFeatures) { @Override protected Collection> getPlugins() { - return List.of(LocalStateInferencePlugin.class, TextSimilarityTestPlugin.class); + return List.of(InferencePlugin.class, TextSimilarityTestPlugin.class); } @Before diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/46_semantic_text_sparse_vector.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/46_semantic_text_sparse_vector.yml new file mode 100644 index 0000000000000..f1cff512fd209 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/46_semantic_text_sparse_vector.yml @@ -0,0 +1,249 @@ +setup: + - requires: + cluster_features: "search.semantic_sparse_vector_query_rewrite_interception_supported" + reason: semantic_text sparse_vector support introduced in 8.18.0 + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + inference.put: + task_type: sparse_embedding + inference_id: sparse-inference-id-2 + body: > + { + "service": "test_service", + "service_settings": { + "model": "my_model", + "api_key": "abc64" + }, + "task_settings": { + } + } + + - do: + indices.create: + index: test-semantic-text-index + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + + - do: + indices.create: + index: test-semantic-text-index-2 + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id-2 + + - do: + indices.create: + index: test-sparse-vector-index + body: + mappings: + properties: + inference_field: + type: sparse_vector + + - do: + index: + index: test-semantic-text-index + id: doc_1 + body: + inference_field: [ "inference test", "another inference test" ] + refresh: true + + - do: + index: + index: test-semantic-text-index-2 + id: doc_3 + body: + inference_field: [ "inference test", "another inference test" ] + refresh: true + + - do: + index: + index: test-sparse-vector-index + id: doc_2 + body: + inference_field: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + refresh: true + +--- +"Nested sparse_vector queries using the old format on semantic_text embeddings and inference still work": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-semantic-text-index + body: + query: + nested: + path: inference_field.inference.chunks + query: + sparse_vector: + field: inference_field.inference.chunks.embeddings + inference_id: sparse-inference-id + query: test + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"Nested sparse_vector queries using the old format on semantic_text embeddings and query vectors still work": + - skip: + features: [ "headers" ] + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the floating-point score as a double + Content-Type: application/json + search: + index: test-semantic-text-index + body: + query: + nested: + path: inference_field.inference.chunks + query: + sparse_vector: + field: inference_field.inference.chunks.embeddings + query_vector: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against semantic_text field using a specified inference ID": + + - do: + search: + index: test-semantic-text-index + body: + query: + sparse_vector: + field: inference_field + inference_id: sparse-inference-id + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against semantic_text field using inference ID configured in semantic_text field": + + - do: + search: + index: test-semantic-text-index + body: + query: + sparse_vector: + field: inference_field + query: "inference test" + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against semantic_text field using query vectors": + + - do: + search: + index: test-semantic-text-index + body: + query: + sparse_vector: + field: inference_field + query_vector: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc_1" } + +--- +"sparse_vector query against combined sparse_vector and semantic_text fields using inference": + + - do: + search: + index: + - test-semantic-text-index + - test-sparse-vector-index + body: + query: + sparse_vector: + field: inference_field + inference_id: sparse-inference-id + query: "inference test" + + - match: { hits.total.value: 2 } + +--- +"sparse_vector query against combined sparse_vector and semantic_text fields still requires inference ID": + + - do: + catch: bad_request + search: + index: + - test-semantic-text-index + - test-sparse-vector-index + body: + query: + sparse_vector: + field: inference_field + query: "inference test" + + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "inference_id required to perform vector search on query string" } + +--- +"sparse_vector query against combined sparse_vector and semantic_text fields using query vectors": + + - do: + search: + index: + - test-semantic-text-index + - test-sparse-vector-index + body: + query: + sparse_vector: + field: inference_field + query_vector: { "feature_0": 1, "feature_1": 2, "feature_2": 3, "feature_3": 4, "feature_4": 5 } + + - match: { hits.total.value: 2 } + + +--- +"sparse_vector query against multiple semantic_text fields with multiple inference IDs specified in semantic_text fields": + + - do: + search: + index: + - test-semantic-text-index + - test-semantic-text-index-2 + body: + query: + sparse_vector: + field: inference_field + query: "inference test" + + - match: { hits.total.value: 2 } + diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java new file mode 100644 index 0000000000000..b460c6abfeee4 --- /dev/null +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceActionIT.java @@ -0,0 +1,250 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.migrate.MigratePlugin; + +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; + +public class CreateIndexFromSourceActionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MigratePlugin.class, ReindexPlugin.class, MockTransportService.TestPlugin.class, DataStreamsPlugin.class); + } + + public void testDestIndexCreated() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute(CreateIndexFromSourceAction.INSTANCE, new CreateIndexFromSourceAction.Request(sourceIndex, destIndex)) + ); + + try { + indicesAdmin().getIndex(new GetIndexRequest().indices(destIndex)).actionGet(); + } catch (IndexNotFoundException e) { + fail(); + } + } + + public void testSettingsCopiedFromSource() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + // start with a static setting + var numShards = randomIntBetween(1, 10); + var staticSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build(); + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, staticSettings)).get(); + + // update with a dynamic setting + var numReplicas = randomIntBetween(0, 10); + var dynamicSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas).build(); + indicesAdmin().updateSettings(new UpdateSettingsRequest(dynamicSettings, sourceIndex)).actionGet(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute(CreateIndexFromSourceAction.INSTANCE, new CreateIndexFromSourceAction.Request(sourceIndex, destIndex)) + ); + + // assert both static and dynamic settings set on dest index + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(numReplicas, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS))); + assertEquals(numShards, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS))); + } + + public void testMappingsCopiedFromSource() { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + String mapping = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + } + } + } + } + """; + indicesAdmin().create(new CreateIndexRequest(sourceIndex).mapping(mapping)).actionGet(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute(CreateIndexFromSourceAction.INSTANCE, new CreateIndexFromSourceAction.Request(sourceIndex, destIndex)) + ); + + var mappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(sourceIndex, destIndex)).actionGet(); + Map mappings = mappingsResponse.mappings(); + var destMappings = mappings.get(destIndex).sourceAsMap(); + var sourceMappings = mappings.get(sourceIndex).sourceAsMap(); + + assertEquals(sourceMappings, destMappings); + // sanity check specific value from dest mapping + assertEquals("text", XContentMapValues.extractValue("properties.foo1.type", destMappings)); + } + + public void testSettingsOverridden() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var numShardsSource = randomIntBetween(1, 10); + var numReplicasSource = randomIntBetween(0, 10); + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + var sourceSettings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsSource) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasSource) + .build(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, sourceSettings)).get(); + + boolean overrideNumShards = randomBoolean(); + Settings settingsOverride = overrideNumShards + ? Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsSource + 1).build() + : Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasSource + 1).build(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute( + CreateIndexFromSourceAction.INSTANCE, + new CreateIndexFromSourceAction.Request(sourceIndex, destIndex, settingsOverride, Map.of()) + ) + ); + + // assert settings overridden + int expectedShards = overrideNumShards ? numShardsSource + 1 : numShardsSource; + int expectedReplicas = overrideNumShards ? numReplicasSource : numReplicasSource + 1; + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertEquals(expectedShards, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_SHARDS))); + assertEquals(expectedReplicas, Integer.parseInt(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_NUMBER_OF_REPLICAS))); + } + + public void testSettingsNullOverride() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + var sourceSettings = Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true).build(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, sourceSettings)).get(); + + Settings settingsOverride = Settings.builder().putNull(IndexMetadata.SETTING_BLOCKS_WRITE).build(); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute( + CreateIndexFromSourceAction.INSTANCE, + new CreateIndexFromSourceAction.Request(sourceIndex, destIndex, settingsOverride, Map.of()) + ) + ); + + // assert settings overridden + var settingsResponse = indicesAdmin().getSettings(new GetSettingsRequest().indices(destIndex)).actionGet(); + assertNull(settingsResponse.getSetting(destIndex, IndexMetadata.SETTING_BLOCKS_WRITE)); + } + + public void testMappingsOverridden() { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + + var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + String sourceMapping = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"text" + }, + "foo2":{ + "type":"boolean" + } + } + } + } + """; + indicesAdmin().create(new CreateIndexRequest(sourceIndex).mapping(sourceMapping)).actionGet(); + + String mappingOverrideStr = """ + { + "_doc":{ + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"integer" + }, + "foo3": { + "type":"keyword" + } + } + } + } + """; + var mappingOverride = XContentHelper.convertToMap(JsonXContent.jsonXContent, mappingOverrideStr, false); + + // create from source + var destIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); + assertAcked( + client().execute( + CreateIndexFromSourceAction.INSTANCE, + new CreateIndexFromSourceAction.Request(sourceIndex, destIndex, Settings.EMPTY, mappingOverride) + ) + ); + + var mappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(destIndex)).actionGet(); + Map mappings = mappingsResponse.mappings(); + var destMappings = mappings.get(destIndex).sourceAsMap(); + + String expectedMappingStr = """ + { + "dynamic":"strict", + "properties":{ + "foo1":{ + "type":"integer" + }, + "foo2": { + "type":"boolean" + }, + "foo3": { + "type":"keyword" + } + } + } + """; + var expectedMapping = XContentHelper.convertToMap(JsonXContent.jsonXContent, expectedMappingStr, false); + assertEquals(expectedMapping, destMappings); + } +} diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java index 6e24e644cb2af..b32a6efb854d7 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java @@ -29,7 +29,7 @@ import org.elasticsearch.xpack.migrate.MigratePlugin; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamRequest; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; -import org.elasticsearch.xpack.migrate.task.ReindexDataStreamStatus; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; import java.util.Collection; @@ -37,6 +37,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -100,7 +101,7 @@ public void testAlreadyUpToDateDataStream() throws Exception { assertThat(task.getStatus().complete(), equalTo(true)); assertNull(task.getStatus().exception()); assertThat(task.getStatus().pending(), equalTo(0)); - assertThat(task.getStatus().inProgress(), equalTo(0)); + assertThat(task.getStatus().inProgress(), equalTo(Set.of())); assertThat(task.getStatus().errors().size(), equalTo(0)); assertBusy(() -> { @@ -108,12 +109,12 @@ public void testAlreadyUpToDateDataStream() throws Exception { new ActionType(GetMigrationReindexStatusAction.NAME), new GetMigrationReindexStatusAction.Request(dataStreamName) ).actionGet(); - ReindexDataStreamStatus status = (ReindexDataStreamStatus) statusResponse.getTask().getTask().status(); + ReindexDataStreamEnrichedStatus status = statusResponse.getEnrichedStatus(); assertThat(status.complete(), equalTo(true)); assertThat(status.errors(), equalTo(List.of())); assertThat(status.exception(), equalTo(null)); assertThat(status.pending(), equalTo(0)); - assertThat(status.inProgress(), equalTo(0)); + assertThat(status.inProgress().size(), equalTo(0)); assertThat(status.totalIndices(), equalTo(backingIndexCount)); assertThat(status.totalIndicesToBeUpgraded(), equalTo(0)); }); diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java similarity index 98% rename from x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java rename to x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java index e492f035da866..0ca58ecf0f0d5 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java @@ -53,7 +53,7 @@ import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; import static org.hamcrest.Matchers.equalTo; -public class ReindexDatastreamIndexIT extends ESIntegTestCase { +public class ReindexDatastreamIndexTransportActionIT extends ESIntegTestCase { private static final String MAPPING = """ { @@ -126,12 +126,14 @@ public void testDestIndexContainsDocs() throws Exception { assertHitCount(prepareSearch(response.getDestIndex()).setSize(0), numDocs); } - public void testSetSourceToReadOnly() throws Exception { + public void testSetSourceToBlockWrites() throws Exception { assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); + var settings = randomBoolean() ? Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true).build() : Settings.EMPTY; + // empty source index var sourceIndex = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); - indicesAdmin().create(new CreateIndexRequest(sourceIndex)).get(); + indicesAdmin().create(new CreateIndexRequest(sourceIndex, settings)).get(); // call reindex client().execute(ReindexDataStreamIndexAction.INSTANCE, new ReindexDataStreamIndexAction.Request(sourceIndex)).actionGet(); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java index f42d05727b9fd..d9dffdefafa2c 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -34,6 +34,8 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamAction; import org.elasticsearch.xpack.migrate.action.CancelReindexDataStreamTransportAction; +import org.elasticsearch.xpack.migrate.action.CreateIndexFromSourceAction; +import org.elasticsearch.xpack.migrate.action.CreateIndexFromSourceTransportAction; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusTransportAction; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction; @@ -87,6 +89,7 @@ public List getRestHandlers( actions.add(new ActionHandler<>(GetMigrationReindexStatusAction.INSTANCE, GetMigrationReindexStatusTransportAction.class)); actions.add(new ActionHandler<>(CancelReindexDataStreamAction.INSTANCE, CancelReindexDataStreamTransportAction.class)); actions.add(new ActionHandler<>(ReindexDataStreamIndexAction.INSTANCE, ReindexDataStreamIndexTransportAction.class)); + actions.add(new ActionHandler<>(CreateIndexFromSourceAction.INSTANCE, CreateIndexFromSourceTransportAction.class)); } return actions; } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceAction.java new file mode 100644 index 0000000000000..d67eaee3d251f --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceAction.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class CreateIndexFromSourceAction extends ActionType { + + public static final String NAME = "indices:admin/index/create_from_source"; + + public static final ActionType INSTANCE = new CreateIndexFromSourceAction(); + + private CreateIndexFromSourceAction() { + super(NAME); + } + + public static class Request extends ActionRequest implements IndicesRequest { + + private final String sourceIndex; + private final String destIndex; + private final Settings settingsOverride; + private final Map mappingsOverride; + + public Request(String sourceIndex, String destIndex) { + this(sourceIndex, destIndex, Settings.EMPTY, Map.of()); + } + + public Request(String sourceIndex, String destIndex, Settings settingsOverride, Map mappingsOverride) { + Objects.requireNonNull(mappingsOverride); + this.sourceIndex = sourceIndex; + this.destIndex = destIndex; + this.settingsOverride = settingsOverride; + this.mappingsOverride = mappingsOverride; + } + + @SuppressWarnings("unchecked") + public Request(StreamInput in) throws IOException { + super(in); + this.sourceIndex = in.readString(); + this.destIndex = in.readString(); + this.settingsOverride = Settings.readSettingsFromStream(in); + this.mappingsOverride = (Map) in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(sourceIndex); + out.writeString(destIndex); + settingsOverride.writeTo(out); + out.writeGenericValue(mappingsOverride); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public String getSourceIndex() { + return sourceIndex; + } + + public String getDestIndex() { + return destIndex; + } + + public Settings getSettingsOverride() { + return settingsOverride; + } + + public Map getMappingsOverride() { + return mappingsOverride; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(sourceIndex, request.sourceIndex) + && Objects.equals(destIndex, request.destIndex) + && Objects.equals(settingsOverride, request.settingsOverride) + && Objects.equals(mappingsOverride, request.mappingsOverride); + } + + @Override + public int hashCode() { + return Objects.hash(sourceIndex, destIndex, settingsOverride, mappingsOverride); + } + + @Override + public String[] indices() { + return new String[] { sourceIndex }; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceTransportAction.java new file mode 100644 index 0000000000000..968b2220628a9 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/CreateIndexFromSourceTransportAction.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.migrate.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MappingMetadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +public class CreateIndexFromSourceTransportAction extends HandledTransportAction< + CreateIndexFromSourceAction.Request, + AcknowledgedResponse> { + private static final Logger logger = LogManager.getLogger(CreateIndexFromSourceTransportAction.class); + + private final ClusterService clusterService; + private final Client client; + private final IndexScopedSettings indexScopedSettings; + + @Inject + public CreateIndexFromSourceTransportAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + Client client, + IndexScopedSettings indexScopedSettings + ) { + super( + CreateIndexFromSourceAction.NAME, + false, + transportService, + actionFilters, + CreateIndexFromSourceAction.Request::new, + transportService.getThreadPool().executor(ThreadPool.Names.GENERIC) + ); + this.clusterService = clusterService; + this.client = client; + this.indexScopedSettings = indexScopedSettings; + } + + @Override + protected void doExecute(Task task, CreateIndexFromSourceAction.Request request, ActionListener listener) { + + IndexMetadata sourceIndex = clusterService.state().getMetadata().index(request.getSourceIndex()); + + if (sourceIndex == null) { + listener.onFailure(new IndexNotFoundException(request.getSourceIndex())); + return; + } + + logger.debug("Creating destination index [{}] for source index [{}]", request.getDestIndex(), request.getSourceIndex()); + + Settings settings = Settings.builder() + // add source settings + .put(filterSettings(sourceIndex)) + // add override settings from request + .put(request.getSettingsOverride()) + .build(); + + Map mergeMappings; + try { + mergeMappings = mergeMappings(sourceIndex.mapping(), request.getMappingsOverride()); + } catch (IOException e) { + listener.onFailure(e); + return; + } + + var createIndexRequest = new CreateIndexRequest(request.getDestIndex()).settings(settings); + if (mergeMappings.isEmpty() == false) { + createIndexRequest.mapping(mergeMappings); + } + + client.admin().indices().create(createIndexRequest, listener.map(response -> response)); + } + + private static Map toMap(@Nullable MappingMetadata sourceMapping) { + return Optional.ofNullable(sourceMapping) + .map(MappingMetadata::source) + .map(CompressedXContent::uncompressed) + .map(s -> XContentHelper.convertToMap(s, true, XContentType.JSON).v2()) + .orElse(Map.of()); + } + + private static Map mergeMappings(@Nullable MappingMetadata sourceMapping, Map mappingAddition) + throws IOException { + Map combinedMappingMap = new HashMap<>(toMap(sourceMapping)); + XContentHelper.update(combinedMappingMap, mappingAddition, true); + return combinedMappingMap; + } + + // Filter source index settings to subset of settings that can be included during reindex. + // Similar to the settings filtering done when reindexing for upgrade in Kibana + // https://github.com/elastic/kibana/blob/8a8363f02cc990732eb9cbb60cd388643a336bed/x-pack + // /plugins/upgrade_assistant/server/lib/reindexing/index_settings.ts#L155 + private Settings filterSettings(IndexMetadata sourceIndex) { + return MetadataCreateIndexService.copySettingsFromSource(false, sourceIndex.getSettings(), indexScopedSettings, Settings.builder()) + .build(); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java index 68ccaef4bf02c..bc084f3e0b5d6 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java @@ -16,10 +16,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskResult; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; import java.io.IOException; import java.util.Objects; @@ -36,46 +35,43 @@ public GetMigrationReindexStatusAction() { } public static class Response extends ActionResponse implements ToXContentObject { - private final TaskResult task; + private final ReindexDataStreamEnrichedStatus enrichedStatus; - public Response(TaskResult task) { - this.task = requireNonNull(task, "task is required"); + public Response(ReindexDataStreamEnrichedStatus enrichedStatus) { + this.enrichedStatus = requireNonNull(enrichedStatus, "status is required"); } public Response(StreamInput in) throws IOException { super(in); - task = in.readOptionalWriteable(TaskResult::new); + enrichedStatus = in.readOptionalWriteable(ReindexDataStreamEnrichedStatus::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(task); + out.writeOptionalWriteable(enrichedStatus); } /** * Get the actual result of the fetch. */ - public TaskResult getTask() { - return task; + public ReindexDataStreamEnrichedStatus getEnrichedStatus() { + return enrichedStatus; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Task.Status status = task.getTask().status(); - if (status != null) { - task.getTask().status().toXContent(builder, params); - } + enrichedStatus.toXContent(builder, params); return builder; } @Override public int hashCode() { - return Objects.hashCode(task); + return Objects.hashCode(enrichedStatus); } @Override public boolean equals(Object other) { - return other instanceof Response && task.equals(((Response) other).task); + return other instanceof Response && enrichedStatus.equals(((Response) other).enrichedStatus); } @Override diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java index ca81a03fc5630..64864491191e5 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java @@ -11,40 +11,55 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.tasks.TaskResult; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Request; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Response; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamStatus; +import java.util.HashMap; import java.util.Map; import java.util.Optional; +import java.util.Set; +import java.util.stream.Stream; public class GetMigrationReindexStatusTransportAction extends HandledTransportAction { private final ClusterService clusterService; private final TransportService transportService; + private final Client client; @Inject public GetMigrationReindexStatusTransportAction( ClusterService clusterService, TransportService transportService, - ActionFilters actionFilters + ActionFilters actionFilters, + Client client ) { super(GetMigrationReindexStatusAction.NAME, transportService, actionFilters, Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.clusterService = clusterService; this.transportService = transportService; + this.client = client; } @Override @@ -60,9 +75,9 @@ protected void doExecute(Task task, Request request, ActionListener li } else if (persistentTask.isAssigned()) { String nodeId = persistentTask.getExecutorNode(); if (clusterService.localNode().getId().equals(nodeId)) { - getRunningTaskFromNode(persistentTaskId, listener); + fetchAndReportStatusForTaskOnThisNode(persistentTaskId, listener); } else { - runOnNodeWithTaskIfPossible(task, request, nodeId, listener); + fetchAndReportStatusForTaskOnRemoteNode(task, request, nodeId, listener); } } else { listener.onFailure(new ElasticsearchException("Persistent task with id [{}] is not assigned to a node", persistentTaskId)); @@ -82,7 +97,7 @@ private Task getRunningPersistentTaskFromTaskManager(String persistentTaskId) { return optionalTask.map(Map.Entry::getValue).orElse(null); } - void getRunningTaskFromNode(String persistentTaskId, ActionListener listener) { + void fetchAndReportStatusForTaskOnThisNode(String persistentTaskId, ActionListener listener) { Task runningTask = getRunningPersistentTaskFromTaskManager(persistentTaskId); if (runningTask == null) { listener.onFailure( @@ -96,11 +111,97 @@ void getRunningTaskFromNode(String persistentTaskId, ActionListener li ); } else { TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); - listener.onResponse(new Response(new TaskResult(false, info))); + ReindexDataStreamStatus status = (ReindexDataStreamStatus) info.status(); + Set inProgressIndices = status.inProgress(); + if (inProgressIndices.isEmpty()) { + // We have no reason to fetch index stats since there are no in progress indices + reportStatus(Map.of(), status, listener); + } else { + fetchInProgressStatsAndReportStatus(inProgressIndices, status, listener); + } } } - private void runOnNodeWithTaskIfPossible(Task thisTask, Request request, String nodeId, ActionListener listener) { + /* + * The status is enriched with the information from inProgressMap to create a new ReindexDataStreamEnrichedStatus, which is used in the + * response sent to the listener. + */ + private void reportStatus( + Map> inProgressMap, + ReindexDataStreamStatus status, + ActionListener listener + ) { + ReindexDataStreamEnrichedStatus enrichedStatus = new ReindexDataStreamEnrichedStatus( + status.persistentTaskStartTime(), + status.totalIndices(), + status.totalIndicesToBeUpgraded(), + status.complete(), + status.exception(), + inProgressMap, + status.pending(), + status.errors() + ); + listener.onResponse(new Response(enrichedStatus)); + } + + /* + * This method feches doc counts for all indices in inProgressIndices (and the indices they are being reindexed into). After + * successfully fetching those, reportStatus is called. + */ + private void fetchInProgressStatsAndReportStatus( + Set inProgressIndices, + ReindexDataStreamStatus status, + ActionListener listener + ) { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + String[] indices = inProgressIndices.stream() + .flatMap(index -> Stream.of(index, ReindexDataStreamIndexTransportAction.generateDestIndexName(index))) + .toList() + .toArray(new String[0]); + indicesStatsRequest.indices(indices); + /* + * It is possible that the destination index will not exist yet, so we want to ignore the fact that it is missing + */ + indicesStatsRequest.indicesOptions(IndicesOptions.fromOptions(true, true, true, true)); + client.execute(IndicesStatsAction.INSTANCE, indicesStatsRequest, new ActionListener() { + @Override + public void onResponse(IndicesStatsResponse indicesStatsResponse) { + Map> inProgressMap = new HashMap<>(); + for (String index : inProgressIndices) { + IndexStats sourceIndexStats = indicesStatsResponse.getIndex(index); + final long totalDocsInIndex; + if (sourceIndexStats == null) { + totalDocsInIndex = 0; + } else { + DocsStats totalDocsStats = sourceIndexStats.getTotal().getDocs(); + totalDocsInIndex = totalDocsStats == null ? 0 : totalDocsStats.getCount(); + } + IndexStats migratedIndexStats = indicesStatsResponse.getIndex( + ReindexDataStreamIndexTransportAction.generateDestIndexName(index) + ); + final long reindexedDocsInIndex; + if (migratedIndexStats == null) { + reindexedDocsInIndex = 0; + } else { + DocsStats reindexedDocsStats = migratedIndexStats.getTotal().getDocs(); + reindexedDocsInIndex = reindexedDocsStats == null ? 0 : reindexedDocsStats.getCount(); + } + inProgressMap.put(index, Tuple.tuple(totalDocsInIndex, reindexedDocsInIndex)); + } + reportStatus(inProgressMap, status, listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + /* + * The task and its status exist on some other node, so this method forwards the request to that node. + */ + private void fetchAndReportStatusForTaskOnRemoteNode(Task thisTask, Request request, String nodeId, ActionListener listener) { DiscoveryNode node = clusterService.state().nodes().get(nodeId); if (node == null) { listener.onFailure( diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java index 00c81fdc9fbc6..2e3fd1b76ed32 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexAction.java @@ -41,10 +41,6 @@ public Request(StreamInput in) throws IOException { this.sourceIndex = in.readString(); } - public static Request readFrom(StreamInput in) throws IOException { - return new Request(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index 8863c45691c92..165fd61ae6599 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -10,10 +10,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; @@ -22,9 +22,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -37,28 +35,25 @@ import java.util.Locale; import java.util.Map; -import java.util.Set; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.READ_ONLY; +import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; public class ReindexDataStreamIndexTransportAction extends HandledTransportAction< ReindexDataStreamIndexAction.Request, ReindexDataStreamIndexAction.Response> { private static final Logger logger = LogManager.getLogger(ReindexDataStreamIndexTransportAction.class); - - private static final Set SETTINGS_TO_ADD_BACK = Set.of(IndexMetadata.SETTING_BLOCKS_WRITE, IndexMetadata.SETTING_READ_ONLY); - private static final IndicesOptions IGNORE_MISSING_OPTIONS = IndicesOptions.fromOptions(true, true, false, false); private final ClusterService clusterService; private final Client client; - private final IndexScopedSettings indexScopedSettings; @Inject public ReindexDataStreamIndexTransportAction( TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, - Client client, - IndexScopedSettings indexScopedSettings + Client client ) { super( ReindexDataStreamIndexAction.NAME, @@ -70,7 +65,6 @@ public ReindexDataStreamIndexTransportAction( ); this.clusterService = clusterService; this.client = client; - this.indexScopedSettings = indexScopedSettings; } @Override @@ -96,20 +90,19 @@ protected void doExecute( SubscribableListener.newForked(l -> setBlockWrites(sourceIndexName, l)) .andThen(l -> deleteDestIfExists(destIndexName, l)) - .andThen(l -> createIndex(sourceIndex, destIndexName, l)) + .andThen(l -> createIndex(sourceIndex, destIndexName, l)) .andThen(l -> reindex(sourceIndexName, destIndexName, l)) - .andThen(l -> updateSettings(settingsBefore, destIndexName, l)) + .andThen(l -> addBlockIfFromSource(READ_ONLY, settingsBefore, destIndexName, l)) + .andThen(l -> addBlockIfFromSource(WRITE, settingsBefore, destIndexName, l)) .andThenApply(ignored -> new ReindexDataStreamIndexAction.Response(destIndexName)) .addListener(listener); } private void setBlockWrites(String sourceIndexName, ActionListener listener) { logger.debug("Setting write block on source index [{}]", sourceIndexName); - final Settings readOnlySettings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build(); - var updateSettingsRequest = new UpdateSettingsRequest(readOnlySettings, sourceIndexName); - client.admin().indices().updateSettings(updateSettingsRequest, new ActionListener<>() { + addBlockToIndex(WRITE, sourceIndexName, new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse response) { + public void onResponse(AddIndexBlockResponse response) { if (response.isAcknowledged()) { listener.onResponse(null); } else { @@ -121,7 +114,7 @@ public void onResponse(AcknowledgedResponse response) { @Override public void onFailure(Exception e) { if (e instanceof ClusterBlockException || e.getCause() instanceof ClusterBlockException) { - // It's fine if read-only is already set + // It's fine if block-writes is already set listener.onResponse(null); } else { listener.onFailure(e); @@ -138,18 +131,23 @@ private void deleteDestIfExists(String destIndexName, ActionListener listener) { + private void createIndex(IndexMetadata sourceIndex, String destIndexName, ActionListener listener) { logger.debug("Creating destination index [{}] for source index [{}]", destIndexName, sourceIndex.getIndex().getName()); - // Create destination with subset of source index settings that can be added before reindex - var settings = getPreSettings(sourceIndex); - - var sourceMapping = sourceIndex.mapping(); - Map mapping = sourceMapping != null ? sourceMapping.rawSourceAsMap() : Map.of(); - var createIndexRequest = new CreateIndexRequest(destIndexName).settings(settings).mapping(mapping); - - var errorMessage = String.format(Locale.ROOT, "Could not create index [%s]", destIndexName); - client.admin().indices().create(createIndexRequest, failIfNotAcknowledged(listener, errorMessage)); + // override read-only settings if they exist + var removeReadOnlyOverride = Settings.builder() + .putNull(IndexMetadata.SETTING_READ_ONLY) + .putNull(IndexMetadata.SETTING_BLOCKS_WRITE) + .build(); + + var request = new CreateIndexFromSourceAction.Request( + sourceIndex.getIndex().getName(), + destIndexName, + removeReadOnlyOverride, + Map.of() + ); + var errorMessage = String.format(Locale.ROOT, "Could not create index [%s]", request.getDestIndex()); + client.execute(CreateIndexFromSourceAction.INSTANCE, request, failIfNotAcknowledged(listener, errorMessage)); } private void reindex(String sourceIndexName, String destIndexName, ActionListener listener) { @@ -162,35 +160,18 @@ private void reindex(String sourceIndexName, String destIndexName, ActionListene client.execute(ReindexAction.INSTANCE, reindexRequest, listener); } - private void updateSettings(Settings settingsBefore, String destIndexName, ActionListener listener) { - logger.debug("Adding settings from source index that could not be added before reindex"); - - Settings postSettings = getPostSettings(settingsBefore); - if (postSettings.isEmpty()) { + private void addBlockIfFromSource( + IndexMetadata.APIBlock block, + Settings settingsBefore, + String destIndexName, + ActionListener listener + ) { + if (settingsBefore.getAsBoolean(block.settingName(), false)) { + var errorMessage = String.format(Locale.ROOT, "Add [%s] block to index [%s] was not acknowledged", block.name(), destIndexName); + addBlockToIndex(block, destIndexName, failIfNotAcknowledged(listener, errorMessage)); + } else { listener.onResponse(null); - return; } - - var updateSettingsRequest = new UpdateSettingsRequest(postSettings, destIndexName); - var errorMessage = String.format(Locale.ROOT, "Could not update settings on index [%s]", destIndexName); - client.admin().indices().updateSettings(updateSettingsRequest, failIfNotAcknowledged(listener, errorMessage)); - } - - // Filter source index settings to subset of settings that can be included during reindex. - // Similar to the settings filtering done when reindexing for upgrade in Kibana - // https://github.com/elastic/kibana/blob/8a8363f02cc990732eb9cbb60cd388643a336bed/x-pack - // /plugins/upgrade_assistant/server/lib/reindexing/index_settings.ts#L155 - private Settings getPreSettings(IndexMetadata sourceIndex) { - // filter settings that will be added back later - var filtered = sourceIndex.getSettings().filter(settingName -> SETTINGS_TO_ADD_BACK.contains(settingName) == false); - - // filter private and non-copyable settings - var builder = MetadataCreateIndexService.copySettingsFromSource(false, filtered, indexScopedSettings, Settings.builder()); - return builder.build(); - } - - private Settings getPostSettings(Settings settingsBefore) { - return settingsBefore.filter(SETTINGS_TO_ADD_BACK::contains); } public static String generateDestIndexName(String sourceIndex) { @@ -201,11 +182,16 @@ private static ActionListener failIfNotAckno ActionListener listener, String errorMessage ) { - return listener.delegateFailureAndWrap((delegate, response) -> { + return listener.delegateFailure((delegate, response) -> { if (response.isAcknowledged()) { delegate.onResponse(null); + } else { + delegate.onFailure(new ElasticsearchException(errorMessage)); } - throw new ElasticsearchException(errorMessage); }); } + + private void addBlockToIndex(IndexMetadata.APIBlock block, String index, ActionListener listener) { + client.admin().indices().execute(TransportAddIndexBlockAction.TYPE, new AddIndexBlockRequest(block, index), listener); + } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatus.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatus.java new file mode 100644 index 0000000000000..9dbe1f0c8eebc --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatus.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.task; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/* + * This class represents information similar to that in ReindexDataStreamStatus, but enriched from other sources besides just the task + * itself. + */ +public record ReindexDataStreamEnrichedStatus( + long persistentTaskStartTime, + int totalIndices, + int totalIndicesToBeUpgraded, + boolean complete, + Exception exception, + Map> inProgress, + int pending, + List> errors +) implements ToXContentObject, Writeable { + public ReindexDataStreamEnrichedStatus { + Objects.requireNonNull(inProgress); + Objects.requireNonNull(errors); + } + + public ReindexDataStreamEnrichedStatus(StreamInput in) throws IOException { + this( + in.readLong(), + in.readInt(), + in.readInt(), + in.readBoolean(), + in.readException(), + in.readMap(StreamInput::readString, in2 -> Tuple.tuple(in2.readLong(), in2.readLong())), + in.readInt(), + in.readCollectionAsList(in1 -> Tuple.tuple(in1.readString(), in1.readException())) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(persistentTaskStartTime); + out.writeInt(totalIndices); + out.writeInt(totalIndicesToBeUpgraded); + out.writeBoolean(complete); + out.writeException(exception); + out.writeMap(inProgress, StreamOutput::writeString, (out2, tuple) -> { + out2.writeLong(tuple.v1()); + out2.writeLong(tuple.v2()); + }); + out.writeInt(pending); + out.writeCollection(errors, (out1, tuple) -> { + out1.writeString(tuple.v1()); + out1.writeException(tuple.v2()); + }); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.timestampFieldsFromUnixEpochMillis("start_time_millis", "start_time", persistentTaskStartTime); + builder.field("complete", complete); + builder.field("total_indices_in_data_stream", totalIndices); + builder.field("total_indices_requiring_upgrade", totalIndicesToBeUpgraded); + builder.field("successes", totalIndicesToBeUpgraded - (inProgress.size() + pending + errors.size())); + builder.startArray("in_progress"); + for (Map.Entry> inProgressEntry : inProgress.entrySet()) { + builder.startObject(); + builder.field("index", inProgressEntry.getKey()); + builder.field("total_doc_count", inProgressEntry.getValue().v1()); + builder.field("reindexed_doc_count", inProgressEntry.getValue().v2()); + builder.endObject(); + } + builder.endArray(); + builder.field("pending", pending); + builder.startArray("errors"); + for (Tuple error : errors) { + builder.startObject(); + builder.field("index", error.v1()); + builder.field("message", error.v2() == null ? "unknown" : error.v2().getMessage()); + builder.endObject(); + } + builder.endArray(); + if (exception != null) { + builder.field("exception", exception.getMessage()); + } + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index 176220a1ccae8..494be303980a7 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -78,9 +78,9 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask .toList(); reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); for (Index index : indicesToBeReindexed) { - reindexDataStreamTask.incrementInProgressIndicesCount(); + reindexDataStreamTask.incrementInProgressIndicesCount(index.getName()); // TODO This is just a placeholder. This is where the real data stream reindex logic will go - reindexDataStreamTask.reindexSucceeded(); + reindexDataStreamTask.reindexSucceeded(index.getName()); } completeSuccessfulPersistentTask(reindexDataStreamTask); diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java index 358062550b50a..632aea076ea5a 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java @@ -16,6 +16,7 @@ import java.io.IOException; import java.util.List; import java.util.Objects; +import java.util.Set; public record ReindexDataStreamStatus( long persistentTaskStartTime, @@ -23,11 +24,12 @@ public record ReindexDataStreamStatus( int totalIndicesToBeUpgraded, boolean complete, Exception exception, - int inProgress, + Set inProgress, int pending, List> errors ) implements Task.Status { public ReindexDataStreamStatus { + Objects.requireNonNull(inProgress); Objects.requireNonNull(errors); } @@ -40,7 +42,7 @@ public ReindexDataStreamStatus(StreamInput in) throws IOException { in.readInt(), in.readBoolean(), in.readException(), - in.readInt(), + in.readCollectionAsSet((Reader) StreamInput::readString), in.readInt(), in.readCollectionAsList(in1 -> Tuple.tuple(in1.readString(), in1.readException())) ); @@ -58,7 +60,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(totalIndicesToBeUpgraded); out.writeBoolean(complete); out.writeException(exception); - out.writeInt(inProgress); + out.writeStringCollection(inProgress); out.writeInt(pending); out.writeCollection(errors, (out1, tuple) -> { out1.writeString(tuple.v1()); @@ -69,12 +71,13 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("start_time", persistentTaskStartTime); + builder.timestampFieldsFromUnixEpochMillis("start_time_millis", "start_time", persistentTaskStartTime); builder.field("complete", complete); - builder.field("total_indices", totalIndices); + builder.field("total_indices_in_data_stream", totalIndices); builder.field("total_indices_requiring_upgrade", totalIndicesToBeUpgraded); - builder.field("successes", totalIndicesToBeUpgraded - (inProgress + pending + errors.size())); - builder.field("in_progress", inProgress); + final int inProgressSize = inProgress.size(); + builder.field("successes", totalIndicesToBeUpgraded - (inProgressSize + pending + errors.size())); + builder.field("in_progress", inProgressSize); builder.field("pending", pending); builder.startArray("errors"); for (Tuple error : errors) { diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java index 844f24f45ab77..7a2b759dfd17a 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java @@ -15,8 +15,11 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; public class ReindexDataStreamTask extends AllocatedPersistentTask { @@ -26,9 +29,9 @@ public class ReindexDataStreamTask extends AllocatedPersistentTask { private final int totalIndicesToBeUpgraded; private volatile boolean complete = false; private volatile Exception exception; - private final AtomicInteger inProgress = new AtomicInteger(0); + private final Set inProgress = Collections.synchronizedSet(new HashSet<>()); private final AtomicInteger pending = new AtomicInteger(); - private final List> errors = new ArrayList<>(); + private final List> errors = Collections.synchronizedList(new ArrayList<>()); private final RunOnce completeTask; @SuppressWarnings("this-escape") @@ -64,7 +67,7 @@ public ReindexDataStreamStatus getStatus() { totalIndicesToBeUpgraded, complete, exception, - inProgress.get(), + inProgress, pending.get(), errors ); @@ -84,17 +87,17 @@ public void taskFailed(ThreadPool threadPool, TimeValue timeToLive, Exception e) allReindexesCompleted(threadPool, timeToLive); } - public void reindexSucceeded() { - inProgress.decrementAndGet(); + public void reindexSucceeded(String index) { + inProgress.remove(index); } public void reindexFailed(String index, Exception error) { this.errors.add(Tuple.tuple(index, error)); - inProgress.decrementAndGet(); + inProgress.remove(index); } - public void incrementInProgressIndicesCount() { - inProgress.incrementAndGet(); + public void incrementInProgressIndicesCount(String index) { + inProgress.add(index); pending.decrementAndGet(); } diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java index a18030edbf42c..1361f30840c87 100644 --- a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java @@ -7,25 +7,17 @@ package org.elasticsearch.xpack.migrate.action; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.tasks.RawTaskStatus; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.tasks.TaskResult; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Response; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamEnrichedStatus; import java.io.IOException; -import java.util.Collections; +import java.util.List; import java.util.Map; -import java.util.TreeMap; public class GetMigrationReindexStatusActionResponseTests extends AbstractWireSerializingTestCase { @Override @@ -35,11 +27,7 @@ protected Writeable.Reader instanceReader() { @Override protected Response createTestInstance() { - try { - return new Response(randomTaskResult()); - } catch (IOException e) { - throw new RuntimeException(e); - } + return new Response(getRandomStatus()); } @Override @@ -47,76 +35,44 @@ protected Response mutateInstance(Response instance) throws IOException { return createTestInstance(); // There's only one field } - private static TaskResult randomTaskResult() throws IOException { - return switch (between(0, 2)) { - case 0 -> new TaskResult(randomBoolean(), randomTaskInfo()); - case 1 -> new TaskResult(randomTaskInfo(), new RuntimeException("error")); - case 2 -> new TaskResult(randomTaskInfo(), randomTaskResponse()); - default -> throw new UnsupportedOperationException("Unsupported random TaskResult constructor"); - }; - } - - static TaskInfo randomTaskInfo() { - String nodeId = randomAlphaOfLength(5); - TaskId taskId = randomTaskId(nodeId); - String type = randomAlphaOfLength(5); - String action = randomAlphaOfLength(5); - Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; - String description = randomBoolean() ? randomAlphaOfLength(5) : null; - long startTime = randomLong(); - long runningTimeNanos = randomNonNegativeLong(); - boolean cancellable = randomBoolean(); - boolean cancelled = cancellable && randomBoolean(); - TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(randomAlphaOfLength(5)); - Map headers = randomBoolean() - ? Collections.emptyMap() - : Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); - return new TaskInfo( - taskId, - type, - nodeId, - action, - description, - status, - startTime, - runningTimeNanos, - cancellable, - cancelled, - parentTaskId, - headers + private ReindexDataStreamEnrichedStatus getRandomStatus() { + return new ReindexDataStreamEnrichedStatus( + randomLong(), + randomNegativeInt(), + randomNegativeInt(), + randomBoolean(), + nullableTestException(), + randomInProgressMap(), + randomNegativeInt(), + randomErrorList() ); } - private static TaskId randomTaskId(String nodeId) { - return new TaskId(nodeId, randomLong()); + private Map> randomInProgressMap() { + return randomMap(1, 50, () -> Tuple.tuple(randomAlphaOfLength(50), Tuple.tuple(randomNonNegativeLong(), randomNonNegativeLong()))); } - private static RawTaskStatus randomRawTaskStatus() { - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { - builder.startObject(); - int fields = between(0, 10); - for (int f = 0; f < fields; f++) { - builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5)); - } - builder.endObject(); - return new RawTaskStatus(BytesReference.bytes(builder)); - } catch (IOException e) { - throw new IllegalStateException(e); + private Exception nullableTestException() { + if (randomBoolean()) { + return testException(); } + return null; } - private static ToXContent randomTaskResponse() { - Map result = new TreeMap<>(); - int fields = between(0, 10); - for (int f = 0; f < fields; f++) { - result.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); - } - return (builder, params) -> { - for (Map.Entry entry : result.entrySet()) { - builder.field(entry.getKey(), entry.getValue()); - } - return builder; - }; + private Exception testException() { + /* + * Unfortunately ElasticsearchException doesn't have an equals and just falls back to Object::equals. So we can't test for equality + * when we're using an exception. So always just use null. + */ + return null; + } + + private List> randomErrorList() { + return randomErrorList(0); + } + + private List> randomErrorList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> Tuple.tuple(randomAlphaOfLength(30), testException())); } @Override diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java new file mode 100644 index 0000000000000..993db1096aac2 --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamEnrichedStatusTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.task; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Map.entry; +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.equalTo; + +public class ReindexDataStreamEnrichedStatusTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return ReindexDataStreamEnrichedStatus::new; + } + + @Override + protected ReindexDataStreamEnrichedStatus createTestInstance() { + return new ReindexDataStreamEnrichedStatus( + randomLong(), + randomNegativeInt(), + randomNegativeInt(), + randomBoolean(), + nullableTestException(), + randomInProgressMap(), + randomNegativeInt(), + randomErrorList() + ); + } + + private Map> randomInProgressMap() { + return randomMap(1, 50, () -> Tuple.tuple(randomAlphaOfLength(50), Tuple.tuple(randomNonNegativeLong(), randomNonNegativeLong()))); + } + + private Exception nullableTestException() { + if (randomBoolean()) { + return testException(); + } + return null; + } + + private Exception testException() { + /* + * Unfortunately ElasticsearchException doesn't have an equals and just falls back to Object::equals. So we can't test for equality + * when we're using an exception. So always just use null. + */ + return null; + } + + private List randomList() { + return randomList(0); + } + + private List randomList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); + } + + private Set randomSet(int minSize) { + return randomSet(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); + } + + private List> randomErrorList() { + return randomErrorList(0); + } + + private List> randomErrorList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> Tuple.tuple(randomAlphaOfLength(30), testException())); + } + + @Override + protected ReindexDataStreamEnrichedStatus mutateInstance(ReindexDataStreamEnrichedStatus instance) throws IOException { + long startTime = instance.persistentTaskStartTime(); + int totalIndices = instance.totalIndices(); + int totalIndicesToBeUpgraded = instance.totalIndicesToBeUpgraded(); + boolean complete = instance.complete(); + Exception exception = instance.exception(); + Map> inProgress = instance.inProgress(); + int pending = instance.pending(); + List> errors = instance.errors(); + switch (randomIntBetween(0, 6)) { + case 0 -> startTime = randomLong(); + case 1 -> totalIndices = totalIndices + 1; + case 2 -> totalIndicesToBeUpgraded = totalIndicesToBeUpgraded + 1; + case 3 -> complete = complete == false; + case 4 -> inProgress = randomInProgressMap(); + case 5 -> pending = pending + 1; + case 6 -> errors = randomErrorList(errors.size() + 1); + default -> throw new UnsupportedOperationException(); + } + return new ReindexDataStreamEnrichedStatus( + startTime, + totalIndices, + totalIndicesToBeUpgraded, + complete, + exception, + inProgress, + pending, + errors + ); + } + + public void testToXContent() throws IOException { + ReindexDataStreamEnrichedStatus status = new ReindexDataStreamEnrichedStatus( + 1234L, + 200, + 100, + true, + new ElasticsearchException("the whole task failed"), + Map.of("index-1", Tuple.tuple(10L, 8L)), + 8, + List.of( + Tuple.tuple("index7", new ElasticsearchException("index7 failed")), + Tuple.tuple("index8", new ElasticsearchException("index8 " + "failed")) + ) + ); + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent)) { + builder.humanReadable(true); + status.toXContent(builder, EMPTY_PARAMS); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map parserMap = parser.map(); + assertThat( + parserMap, + equalTo( + Map.ofEntries( + entry("start_time", "1970-01-01T00:00:01.234Z"), + entry("start_time_millis", 1234), + entry("total_indices_in_data_stream", 200), + entry("total_indices_requiring_upgrade", 100), + entry("complete", true), + entry("exception", "the whole task failed"), + entry("successes", 89), + entry("in_progress", List.of(Map.of("index", "index-1", "total_doc_count", 10, "reindexed_doc_count", 8))), + entry("pending", 8), + entry( + "errors", + List.of( + Map.of("index", "index7", "message", "index7 failed"), + Map.of("index", "index8", "message", "index8 failed") + ) + ) + ) + ) + ); + } + } + } +} diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java index d81e9d35cd490..ad47eb6a23cd7 100644 --- a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; import static java.util.Map.entry; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; @@ -39,7 +40,7 @@ protected ReindexDataStreamStatus createTestInstance() { randomNegativeInt(), randomBoolean(), nullableTestException(), - randomNegativeInt(), + randomSet(0), randomNegativeInt(), randomErrorList() ); @@ -68,6 +69,10 @@ private List randomList(int minSize) { return randomList(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); } + private Set randomSet(int minSize) { + return randomSet(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); + } + private List> randomErrorList() { return randomErrorList(0); } @@ -83,7 +88,7 @@ protected ReindexDataStreamStatus mutateInstance(ReindexDataStreamStatus instanc int totalIndicesToBeUpgraded = instance.totalIndicesToBeUpgraded(); boolean complete = instance.complete(); Exception exception = instance.exception(); - int inProgress = instance.inProgress(); + Set inProgress = instance.inProgress(); int pending = instance.pending(); List> errors = instance.errors(); switch (randomIntBetween(0, 6)) { @@ -91,7 +96,7 @@ protected ReindexDataStreamStatus mutateInstance(ReindexDataStreamStatus instanc case 1 -> totalIndices = totalIndices + 1; case 2 -> totalIndicesToBeUpgraded = totalIndicesToBeUpgraded + 1; case 3 -> complete = complete == false; - case 4 -> inProgress = inProgress + 1; + case 4 -> inProgress = randomSet(inProgress.size() + 1); case 5 -> pending = pending + 1; case 6 -> errors = randomErrorList(errors.size() + 1); default -> throw new UnsupportedOperationException(); @@ -115,7 +120,7 @@ public void testToXContent() throws IOException { 100, true, new ElasticsearchException("the whole task failed"), - 12, + randomSet(12, 12, () -> randomAlphaOfLength(50)), 8, List.of( Tuple.tuple("index7", new ElasticsearchException("index7 failed")), @@ -131,8 +136,9 @@ public void testToXContent() throws IOException { parserMap, equalTo( Map.ofEntries( - entry("start_time", 1234), - entry("total_indices", 200), + entry("start_time", "1970-01-01T00:00:01.234Z"), + entry("start_time_millis", 1234), + entry("total_indices_in_data_stream", 200), entry("total_indices_requiring_upgrade", 100), entry("complete", true), entry("exception", "the whole task failed"), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java index 3c0d2aca4deda..3c82841f1b99e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java @@ -8,10 +8,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -102,7 +104,29 @@ public void executeRequest() { try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { BulkResponse addRecordsResponse = client.bulk(bulkRequest).actionGet(); if (addRecordsResponse.hasFailures()) { - logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage()); + // Implementation note: Ignore the failures from writing to the read-only index, as it comes + // from changing the index format version. + boolean hasNonReadOnlyFailures = false; + for (BulkItemResponse response : addRecordsResponse.getItems()) { + if (response.isFailed() == false) { + continue; + } + if (response.getFailureMessage().contains(IndexMetadata.INDEX_READ_ONLY_BLOCK.description())) { + // We expect this to happen when the old index is made read-only and being reindexed + logger.debug( + "[{}] Ignoring failure to write renormalized results to a read-only index [{}]: {}", + jobId, + response.getFailure().getIndex(), + response.getFailureMessage() + ); + } else { + hasNonReadOnlyFailures = true; + break; + } + } + if (hasNonReadOnlyFailures) { + logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage()); + } } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java index ff1a1d19779df..bab012afc3101 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.monitoring.Monitoring; import org.elasticsearch.xpack.security.Security; @@ -87,12 +86,6 @@ protected XPackLicenseState getLicenseState() { } }); plugins.add(new MockedRollupPlugin()); - plugins.add(new InferencePlugin(settings) { - @Override - protected SSLService getSslService() { - return thisVar.getSslService(); - } - }); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 5cf15454e47f2..aeebfabdce704 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -82,6 +82,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.utils.MlTaskState; import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -160,7 +161,8 @@ protected Collection> nodePlugins() { DataStreamsPlugin.class, // To remove errors from parsing build in templates that contain scaled_float MapperExtrasPlugin.class, - Wildcard.class + Wildcard.class, + InferencePlugin.class ); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index f4d50df4ff613..35da4abec223a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -462,6 +462,7 @@ public void testToXContent() throws IOException { pluginEsBuildVersion, Version.CURRENT, IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current(), apmIndicesExist }; final String expectedJson = """ @@ -817,6 +818,7 @@ public void testToXContent() throws IOException { ], "version": "%s", "min_index_version":%s, + "min_read_only_index_version":%s, "max_index_version":%s } }, diff --git a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java index 057ebdece5c61..ab5be0f48f5f3 100644 --- a/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java +++ b/x-pack/plugin/security/qa/consistency-checks/src/test/java/org/elasticsearch/xpack/security/CrossClusterShardTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.frozen.FrozenIndices; import org.elasticsearch.xpack.graph.Graph; import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.profiling.ProfilingPlugin; import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.search.AsyncSearch; @@ -88,6 +89,7 @@ protected Collection> getPlugins() { FrozenIndices.class, Graph.class, IndexLifecycle.class, + InferencePlugin.class, IngestCommonPlugin.class, IngestTestPlugin.class, MustachePlugin.class, diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index c07e4b2c541a2..1cb73de4646cc 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -641,6 +641,7 @@ public class Constants { new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/index/reindex" : null, new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/reindex" : null, new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/reindex_cancel" : null, + new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/index/create_from_source" : null, "internal:admin/repository/verify", "internal:admin/repository/verify/coordinate" ).filter(Objects::nonNull).collect(Collectors.toUnmodifiableSet()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java index 4f87ac27be141..f31d5647b4cc5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java @@ -16,7 +16,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -74,7 +73,7 @@ abstract class PoolingSessionFactory extends SessionFactory implements Releasabl super(config, sslService, threadPool); this.groupResolver = groupResolver; this.bindDn = bindDn; - this.bindRequest = new AtomicReference<>(buildBindRequest(config.settings(), false)); + this.bindRequest = new AtomicReference<>(buildBindRequest(config.settings())); this.useConnectionPool = config.getSetting(poolingEnabled); if (useConnectionPool) { this.connectionPool = createConnectionPool(config, serverSet, timeout, logger, bindRequest.get(), healthCheckDNSupplier); @@ -93,11 +92,9 @@ abstract class PoolingSessionFactory extends SessionFactory implements Releasabl * will perform a setting consistency validation and throw {@link SettingsException} in case of violation. * Due to legacy reasons and BWC, when {@code reloadRequest} is se to {@code false}, this method will only log a warning message. * - * @param reloadRequest {@code true} if this method is called during reloading of secure settings, - * {@code false} if it is called during bootstrapping. * @return A new {@link SimpleBindRequest} that contains configured bind DN and password. */ - private SimpleBindRequest buildBindRequest(Settings settings, boolean reloadRequest) { + private SimpleBindRequest buildBindRequest(Settings settings) { final byte[] bindPassword; final Setting legacyPasswordSetting = config.getConcreteSetting(LEGACY_BIND_PASSWORD); final Setting securePasswordSetting = config.getConcreteSetting(SECURE_BIND_PASSWORD); @@ -119,27 +116,13 @@ private SimpleBindRequest buildBindRequest(Settings settings, boolean reloadRequ return new SimpleBindRequest(); } else { if (bindPassword == null) { - if (reloadRequest) { - throw new SettingsException( - "[{}] is set but no bind password is specified. Without a corresponding bind password, " - + "all {} realm authentication will fail. Specify a bind password via [{}].", - RealmSettings.getFullSettingKey(config, PoolingSessionFactorySettings.BIND_DN), - config.type(), - RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD) - ); - } else { - deprecationLogger.critical( - DeprecationCategory.SECURITY, - "bind_dn_set_without_password", - "[{}] is set but no bind password is specified. Without a corresponding bind password, " - + "all {} realm authentication will fail. Specify a bind password via [{}] or [{}]. " - + "In the next major release, nodes with incomplete bind credentials will fail to start.", - RealmSettings.getFullSettingKey(config, PoolingSessionFactorySettings.BIND_DN), - config.type(), - RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD), - RealmSettings.getFullSettingKey(config, LEGACY_BIND_PASSWORD) - ); - } + throw new SettingsException( + "[{}] is set but no bind password is specified. Without a corresponding bind password, " + + "all {} realm authentication will fail. Specify a bind password via [{}].", + RealmSettings.getFullSettingKey(config, PoolingSessionFactorySettings.BIND_DN), + config.type(), + RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD) + ); } return new SimpleBindRequest(this.bindDn, bindPassword); } @@ -148,7 +131,7 @@ private SimpleBindRequest buildBindRequest(Settings settings, boolean reloadRequ @Override public void reload(Settings settings) { final SimpleBindRequest oldRequest = bindRequest.get(); - final SimpleBindRequest newRequest = buildBindRequest(settings, true); + final SimpleBindRequest newRequest = buildBindRequest(settings); if (bindRequestEquals(newRequest, oldRequest) == false) { if (bindRequest.compareAndSet(oldRequest, newRequest)) { if (connectionPool != null) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java index acb4359b37323..5482d7711031e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.TestEnvironment; @@ -45,7 +46,6 @@ import java.util.Locale; import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; -import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.BIND_DN; import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD; import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.SECURE_BIND_PASSWORD; import static org.hamcrest.Matchers.containsString; @@ -199,7 +199,7 @@ public void testUserSearchBaseScopeFailsWithWrongBaseDN() throws Exception { assertDeprecationWarnings(config.identifier(), useAttribute, useLegacyBindPassword); } - public void testConstructorLogsErrorIfBindDnSetWithoutPassword() throws Exception { + public void testConstructorThrowsIfBindDnSetWithoutPassword() throws Exception { String groupSearchBase = "o=sevenSeas"; String userSearchBase = "cn=William Bush,ou=people,o=sevenSeas"; @@ -216,19 +216,18 @@ public void testConstructorLogsErrorIfBindDnSetWithoutPassword() throws Exceptio new ThreadContext(globalSettings) ); - try (LdapUserSearchSessionFactory ignored = getLdapUserSearchSessionFactory(config, sslService, threadPool)) { - assertCriticalWarnings( - String.format( - Locale.ROOT, - "[%s] is set but no bind password is specified. Without a corresponding bind password, " - + "all ldap realm authentication will fail. Specify a bind password via [%s] or [%s]. " - + "In the next major release, nodes with incomplete bind credentials will fail to start.", - RealmSettings.getFullSettingKey(config, BIND_DN), - RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD), - RealmSettings.getFullSettingKey(config, LEGACY_BIND_PASSWORD) - ) - ); - } + Exception ex = expectThrows(SettingsException.class, () -> getLdapUserSearchSessionFactory(config, sslService, threadPool)); + assertEquals( + String.format( + Locale.ROOT, + "[%s] is set but no bind password is specified. Without a corresponding bind password, " + + "all %s realm authentication will fail. Specify a bind password via [%s].", + RealmSettings.getFullSettingKey(config, PoolingSessionFactorySettings.BIND_DN), + config.type(), + RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD) + ), + ex.getMessage() + ); } public void testConstructorThrowsIfBothLegacyAndSecureBindPasswordSet() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java index ee91b8b65e540..c572b3a35d5a4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/DeprecationRoleDescriptorConsumerTests.java @@ -342,7 +342,7 @@ public void testMultipleIndicesSameAlias() throws Exception { private void addIndex(Metadata.Builder metadataBuilder, String index, String... aliases) { final IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index) - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion(random()))) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersion())) .numberOfShards(1) .numberOfReplicas(1); for (final String alias : aliases) { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml index 663c0dc78acb3..118783b412d48 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml @@ -170,7 +170,7 @@ setup: - match: { error.reason: "Found 1 problem\nline 1:36: Unknown column [content], did you mean [count(*)]?" } --- -"match with functions": +"match with disjunctions": - do: catch: bad_request allowed_warnings_regex: @@ -181,7 +181,20 @@ setup: - match: { status: 400 } - match: { error.type: verification_exception } - - match: { error.reason: "Found 1 problem\nline 1:19: Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. [:] operator can't be used as part of an or condition" } + - match: { error.reason: "/.+Invalid\\ condition\\ \\[content\\:\"fox\"\\ OR\\ to_upper\\(content\\)\\ ==\\ \"FOX\"\\]\\./" } + + - do: + catch: bad_request + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test | WHERE content:"fox" OR to_upper(content) == "FOX"' + + - match: { status: 400 } + - match: { error.type: verification_exception } + - match: { error.reason: "/.+Invalid\\ condition\\ \\[content\\:\"fox\"\\ OR\\ to_upper\\(content\\)\\ ==\\ \"FOX\"\\]\\./" } + --- "match within eval": diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml new file mode 100644 index 0000000000000..fdb6746bbeed8 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml @@ -0,0 +1,78 @@ +--- +setup: + - requires: + test_runner_features: [capabilities, contains] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [join_lookup_v8] + reason: "uses LOOKUP JOIN" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + mappings: + properties: + key: + type: long + color: + type: keyword + - do: + indices.create: + index: test-lookup + body: + settings: + index: + mode: lookup + mappings: + properties: + key: + type: long + color: + type: keyword + - do: + bulk: + index: "test" + refresh: true + body: + - { "index": { } } + - { "key": 1, "color": "red" } + - { "index": { } } + - { "key": 2, "color": "blue" } + - do: + bulk: + index: "test-lookup" + refresh: true + body: + - { "index": { } } + - { "key": 1, "color": "cyan" } + - { "index": { } } + - { "key": 2, "color": "yellow" } + +--- +basic: + - do: + esql.query: + body: + query: 'FROM test | SORT key | LOOKUP JOIN `test-lookup` ON key | LIMIT 3' + + - match: {columns.0.name: "key"} + - match: {columns.0.type: "long"} + - match: {columns.1.name: "color"} + - match: {columns.1.type: "keyword"} + - match: {values.0: [1, "cyan"]} + - match: {values.1: [2, "yellow"]} + +--- +non-lookup index: + - do: + esql.query: + body: + query: 'FROM test-lookup | SORT key | LOOKUP JOIN `test` ON key | LIMIT 3' + catch: "bad_request" + + - match: { error.type: "verification_exception" } + - contains: { error.reason: "Found 1 problem\nline 1:43: invalid [test] resolution in lookup mode to an index in [standard] mode" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 2a4cde9a680e9..b6d75048591e5 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -92,7 +92,7 @@ setup: - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} # Testing for the entire function set isn't feasbile, so we just check that we return the correct count as an approximation. - - length: {esql.functions: 129} # check the "sister" test below for a likely update to the same esql.functions length check + - length: {esql.functions: 130} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": @@ -163,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 125} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 126} # check the "sister" test above for a likely update to the same esql.functions length check diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml index c94ce8dd211ae..80ca95d631491 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml @@ -61,10 +61,10 @@ setup: migrate.get_reindex_status: index: "my-data-stream" - match: { complete: true } - - match: { total_indices: 1 } + - match: { total_indices_in_data_stream: 1 } - match: { total_indices_requiring_upgrade: 0 } - match: { successes: 0 } - - match: { in_progress: 0 } + - match: { in_progress: [] } - match: { pending: 0 } - match: { errors: [] } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml index 332981a580802..3481773b0bab3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml @@ -268,7 +268,7 @@ setup: - match: { hits.hits.0._score: 0.25 } --- -"Test sparse_vector requires one of inference_id or query_vector": +"Test sparse_vector requires one of query or query_vector": - do: catch: /\[sparse_vector\] requires one of \[query_vector\] or \[inference_id\]/ search: @@ -281,7 +281,41 @@ setup: - match: { status: 400 } --- -"Test sparse_vector only allows one of inference_id or query_vector": +"Test sparse_vector returns an error if inference ID not specified with query": + - do: + catch: bad_request # This is for BWC, the actual error message is tested in a subsequent test + search: + index: index-with-sparse-vector + body: + query: + sparse_vector: + field: text + query: "octopus comforter smells" + + - match: { status: 400 } + +--- +"Test sparse_vector requires an inference ID to be specified on sparse_vector fields": + - requires: + cluster_features: [ "search.semantic_sparse_vector_query_rewrite_interception_supported" ] + reason: "Error message changed in 8.18" + - do: + catch: /inference_id required to perform vector search on query string/ + search: + index: index-with-sparse-vector + body: + query: + sparse_vector: + field: text + query: "octopus comforter smells" + + - match: { status: 400 } + +--- +"Test sparse_vector only allows one of query or query_vector (note the error message is misleading)": + - requires: + cluster_features: [ "search.semantic_sparse_vector_query_rewrite_interception_supported" ] + reason: "sparse vector inference checks updated in 8.18 to support sparse_vector on semantic_text fields" - do: catch: /\[sparse_vector\] requires one of \[query_vector\] or \[inference_id\]/ search: @@ -290,7 +324,7 @@ setup: query: sparse_vector: field: text - inference_id: text_expansion_model + query: "octopus comforter smells" query_vector: the: 1.0 comforter: 1.0